diff mbox series

[RFC,bpf-next,v4,4/6] bpf: pin, translate, and unpin __uptr from syscalls.

Message ID 20240816191213.35573-5-thinker.li@gmail.com (mailing list archive)
State New
Headers show
Series None | expand

Commit Message

Kui-Feng Lee Aug. 16, 2024, 7:12 p.m. UTC
When a user program updates a map value, every uptr will be pinned and
translated to an address in the kernel. This process is initiated by
calling bpf_map_update_elem() from user programs.

To access uptrs in BPF programs, they are pinned using
pin_user_pages_fast(), but the conversion to kernel addresses is actually
done by page_address(). The uptrs can be unpinned using unpin_user_pages().

Currently, the memory block pointed to by a uptr must reside in a single
memory page, as crossing multiple pages is not supported. uptr is only
supported by task storage maps and can only be set by user programs through
syscalls.

When the value of an uptr is overwritten or destroyed, the memory pointed
to by the old value must be unpinned. This is ensured by calling
bpf_obj_uptrcpy() and copy_map_uptr_locked() when updating map value and by
bpf_obj_free_fields() when destroying map value.

Cc: linux-mm@kvack.org
Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
---
 include/linux/bpf.h            |  30 ++++++
 kernel/bpf/bpf_local_storage.c |  23 ++++-
 kernel/bpf/helpers.c           |  20 ++++
 kernel/bpf/syscall.c           | 172 ++++++++++++++++++++++++++++++++-
 4 files changed, 237 insertions(+), 8 deletions(-)

Comments

Alexei Starovoitov Aug. 28, 2024, 11:24 p.m. UTC | #1
On Fri, Aug 16, 2024 at 12:12 PM Kui-Feng Lee <thinker.li@gmail.com> wrote:
>
> When a user program updates a map value, every uptr will be pinned and
> translated to an address in the kernel. This process is initiated by
> calling bpf_map_update_elem() from user programs.
>
> To access uptrs in BPF programs, they are pinned using
> pin_user_pages_fast(), but the conversion to kernel addresses is actually
> done by page_address(). The uptrs can be unpinned using unpin_user_pages().
>
> Currently, the memory block pointed to by a uptr must reside in a single
> memory page, as crossing multiple pages is not supported. uptr is only
> supported by task storage maps and can only be set by user programs through
> syscalls.
>
> When the value of an uptr is overwritten or destroyed, the memory pointed
> to by the old value must be unpinned. This is ensured by calling
> bpf_obj_uptrcpy() and copy_map_uptr_locked()

Doesn't look like there is a test for it, but more importantly
unpin shouldn't be called from bpf prog, since
we cannot guarantee that the execution context is safe enough to do unpin.
More on this below.

> when updating map value and by
> bpf_obj_free_fields() when destroying map value.
>
> Cc: linux-mm@kvack.org
> Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
> ---
>  include/linux/bpf.h            |  30 ++++++
>  kernel/bpf/bpf_local_storage.c |  23 ++++-
>  kernel/bpf/helpers.c           |  20 ++++
>  kernel/bpf/syscall.c           | 172 ++++++++++++++++++++++++++++++++-
>  4 files changed, 237 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 954e476b5605..886c818ff555 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -477,6 +477,8 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
>                 data_race(*ldst++ = *lsrc++);
>  }
>
> +void bpf_obj_unpin_uptr(const struct btf_field *field, void *addr);
> +
>  /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
>  static inline void bpf_obj_memcpy(struct btf_record *rec,
>                                   void *dst, void *src, u32 size,
> @@ -503,6 +505,34 @@ static inline void bpf_obj_memcpy(struct btf_record *rec,
>         memcpy(dst + curr_off, src + curr_off, size - curr_off);
>  }
>
> +static inline void bpf_obj_uptrcpy(struct btf_record *rec,
> +                                  void *dst, void *src)
> +{
> +       int i;
> +
> +       if (IS_ERR_OR_NULL(rec))
> +               return;
> +
> +       for (i = 0; i < rec->cnt; i++) {
> +               u32 next_off = rec->fields[i].offset;
> +               void *addr;
> +
> +               if (rec->fields[i].type == BPF_UPTR) {
> +                       /* Unpin old address.
> +                        *
> +                        * Alignments are guaranteed by btf_find_field_one().
> +                        */
> +                       addr = *(void **)(dst + next_off);
> +                       if (addr)
> +                               bpf_obj_unpin_uptr(&rec->fields[i], addr);
> +
> +                       *(void **)(dst + next_off) = *(void **)(src + next_off);
> +               }
> +       }
> +}

The whole helper can be removed. See below.

> +
> +void copy_map_uptr_locked(struct bpf_map *map, void *dst, void *src, bool lock_src);
> +
>  static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
>  {
>         bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
> diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
> index c938dea5ddbf..2fafad53b9d9 100644
> --- a/kernel/bpf/bpf_local_storage.c
> +++ b/kernel/bpf/bpf_local_storage.c
> @@ -99,8 +99,11 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
>         }
>
>         if (selem) {
> -               if (value)
> +               if (value) {
>                         copy_map_value(&smap->map, SDATA(selem)->data, value);
> +                       if (smap->map.map_type == BPF_MAP_TYPE_TASK_STORAGE)
> +                               bpf_obj_uptrcpy(smap->map.record, SDATA(selem)->data, value);

This part should be dropped.
bpf prog should not be able to call unpin on uptr.
It cannot supply new uptr in value anyway.
On the other side the user space should be able to
bpf_map_update_elem() with one value->udata and
then call it again with a different value->udata.
Old one should be unpinned and the new udata pinned,
but that shouldn't be done from the guts of bpf_selem_alloc().
Instead, all of pin/unpin must be done while handling sys_bpf command.
More below.


> +               }
>                 /* No need to call check_and_init_map_value as memory is zero init */
>                 return selem;
>         }
> @@ -575,8 +578,13 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
>                 if (err)
>                         return ERR_PTR(err);
>                 if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
> -                       copy_map_value_locked(&smap->map, old_sdata->data,
> -                                             value, false);
> +                       if (smap->map.map_type == BPF_MAP_TYPE_TASK_STORAGE &&
> +                           btf_record_has_field(smap->map.record, BPF_UPTR))
> +                               copy_map_uptr_locked(&smap->map, old_sdata->data,
> +                                                    value, false);
> +                       else
> +                               copy_map_value_locked(&smap->map, old_sdata->data,
> +                                                     value, false);

Similar. unpin here is dangerous.
Since the combination of bpf_spin_lock and uptr in map element
causing this complexity we should simply disable this combination
until the actual use case comes up.
Then above hunk won't be needed.

>                         return old_sdata;
>                 }
>         }
> @@ -607,8 +615,13 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
>                 goto unlock;
>
>         if (old_sdata && (map_flags & BPF_F_LOCK)) {
> -               copy_map_value_locked(&smap->map, old_sdata->data, value,
> -                                     false);
> +               if (smap->map.map_type == BPF_MAP_TYPE_TASK_STORAGE &&
> +                   btf_record_has_field(smap->map.record, BPF_UPTR))
> +                       copy_map_uptr_locked(&smap->map, old_sdata->data,
> +                                            value, false);
> +               else
> +                       copy_map_value_locked(&smap->map, old_sdata->data,
> +                                             value, false);

This one won't be needed either.

>                 selem = SELEM(old_sdata);
>                 goto unlock;
>         }
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index d02ae323996b..d588b52605b9 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -388,6 +388,26 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
>         preempt_enable();
>  }
>
> +/* Copy map value and uptr from src to dst, with lock_src indicating
> + * whether src or dst is locked.
> + */
> +void copy_map_uptr_locked(struct bpf_map *map, void *src, void *dst,
> +                         bool lock_src)
> +{
> +       struct bpf_spin_lock *lock;
> +
> +       if (lock_src)
> +               lock = src + map->record->spin_lock_off;
> +       else
> +               lock = dst + map->record->spin_lock_off;
> +       preempt_disable();
> +       __bpf_spin_lock_irqsave(lock);
> +       copy_map_value(map, dst, src);
> +       bpf_obj_uptrcpy(map->record, dst, src);
> +       __bpf_spin_unlock_irqrestore(lock);
> +       preempt_enable();
> +}

This one has to be removed too.
Just think of the consequences of the above.
It may do unpin with irqs disabled.
It's asking for trouble depending on where the udata pointer originates.

> +void bpf_obj_unpin_uptr(const struct btf_field *field, void *addr)
> +{
> +       struct page *pages[1];
> +       u32 size, type_id;
> +       int npages;
> +       void *ptr;
> +
> +       type_id = field->kptr.btf_id;
> +       btf_type_id_size(field->kptr.btf, &type_id, &size);
> +       if (size == 0)
> +               return;
> +
> +       ptr = (void *)((intptr_t)addr & PAGE_MASK);
> +
> +       npages = (((intptr_t)addr + size + ~PAGE_MASK) - (intptr_t)ptr) >> PAGE_SHIFT;
> +       if (WARN_ON_ONCE(npages > 1))
> +               return;

This check is unnecessary. We check that there is only one page
during the pin. No need to repeat during unpin.

> +
> +       pages[0] = virt_to_page(ptr);
> +       unpin_user_pages(pages, 1);

The whole helper can be just above two lines.

> +}
> +
> +/* Unpin uptr fields in the record up to cnt */
> +static void bpf_obj_unpin_uptrs_cnt(struct btf_record *rec, int cnt, void *src)
> +{
> +       u32 next_off;
> +       void **kaddr_ptr;
> +       int i;
> +
> +       for (i = 0; i < cnt; i++) {
> +               if (rec->fields[i].type != BPF_UPTR)
> +                       continue;
> +
> +               next_off = rec->fields[i].offset;
> +               kaddr_ptr = src + next_off;
> +               if (*kaddr_ptr) {
> +                       bpf_obj_unpin_uptr(&rec->fields[i], *kaddr_ptr);
> +                       *kaddr_ptr = NULL;
> +               }
> +       }
> +}
> +
> +/* Find all BPF_UPTR fields in the record, pin the user memory, map it
> + * to kernel space, and update the addresses in the source memory.
> + *
> + * The map value passing from userspace may contain user kptrs pointing to
> + * user memory. This function pins the user memory and maps it to kernel
> + * memory so that BPF programs can access it.
> + */
> +static int bpf_obj_trans_pin_uptrs(struct btf_record *rec, void *src, u32 size)
> +{
> +       u32 type_id, tsz, npages, next_off;
> +       void *uaddr, *kaddr, **uaddr_ptr;
> +       const struct btf_type *t;
> +       struct page *pages[1];
> +       int i, err;
> +
> +       if (IS_ERR_OR_NULL(rec))
> +               return 0;
> +
> +       if (!btf_record_has_field(rec, BPF_UPTR))
> +               return 0;
> +
> +       for (i = 0; i < rec->cnt; i++) {
> +               if (rec->fields[i].type != BPF_UPTR)
> +                       continue;
> +
> +               next_off = rec->fields[i].offset;
> +               if (next_off + sizeof(void *) > size) {
> +                       err = -EFAULT;
> +                       goto rollback;
> +               }

size argument and above check are unnecessary.
btf_record has to be correct at this point.

> +               uaddr_ptr = src + next_off;
> +               uaddr = *uaddr_ptr;
> +               if (!uaddr)
> +                       continue;
> +
> +               /* Make sure the user memory takes up at most one page */
> +               type_id = rec->fields[i].kptr.btf_id;
> +               t = btf_type_id_size(rec->fields[i].kptr.btf, &type_id, &tsz);
> +               if (!t) {
> +                       err = -EFAULT;
> +                       goto rollback;
> +               }
> +               if (tsz == 0) {
> +                       *uaddr_ptr = NULL;
> +                       continue;
> +               }

tsz==0 ? are you sure this can happen?
If so there has to be a test for this.
And we probably should reject it earlier in the verifier.
zero sized struct as uptr makes no practical use case.

> +               npages = (((intptr_t)uaddr + tsz + ~PAGE_MASK) -
> +                         ((intptr_t)uaddr & PAGE_MASK)) >> PAGE_SHIFT;
> +               if (npages > 1) {
> +                       /* Allow only one page */
> +                       err = -EFAULT;

E2BIG would be a better error in such a case.

> +                       goto rollback;
> +               }
> +
> +               /* Pin the user memory */
> +               err = pin_user_pages_fast((intptr_t)uaddr, 1, FOLL_LONGTERM | FOLL_WRITE, pages);
> +               if (err < 0)
> +                       goto rollback;

since it's "_fast" version it can return 0 too.
In this case it's a case of rollback as well.
It's better to change this check to if (err != 1)
which is a more canonical way.

> +
> +               /* Map to kernel space */
> +               kaddr = page_address(pages[0]);
> +               if (unlikely(!kaddr)) {
> +                       WARN_ON_ONCE(1);

Since the page was pinned the above cannot fail.
No reason for this check.

> +                       unpin_user_pages(pages, 1);
> +                       err = -EFAULT;
> +                       goto rollback;
> +               }
> +               *uaddr_ptr = kaddr + ((intptr_t)uaddr & ~PAGE_MASK);
> +       }
> +
> +       return 0;
> +
> +rollback:
> +       /* Unpin the user memory of earlier fields */
> +       bpf_obj_unpin_uptrs_cnt(rec, i, src);
> +
> +       return err;
> +}
> +
> +static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *src)
> +{
> +       if (IS_ERR_OR_NULL(rec))
> +               return;
> +
> +       if (!btf_record_has_field(rec, BPF_UPTR))
> +               return;
> +
> +       bpf_obj_unpin_uptrs_cnt(rec, rec->cnt, src);
> +}
> +
> +static int bpf_map_update_value_inner(struct bpf_map *map, struct file *map_file,
> +                                     void *key, void *value, __u64 flags)
>  {
>         int err;
>
> @@ -208,6 +340,29 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
>         return err;
>  }
>
> +static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
> +                               void *key, void *value, __u64 flags)
> +{
> +       int err;
> +
> +       if (map->map_type == BPF_MAP_TYPE_TASK_STORAGE) {
> +               /* Pin user memory can lead to context switch, so we need
> +                * to do it before potential RCU lock.
> +                */
> +               err = bpf_obj_trans_pin_uptrs(map->record, value,
> +                                             bpf_map_value_size(map));
> +               if (err)
> +                       return err;
> +       }
> +
> +       err = bpf_map_update_value_inner(map, map_file, key, value, flags);
> +
> +       if (err && map->map_type == BPF_MAP_TYPE_TASK_STORAGE)
> +               bpf_obj_unpin_uptrs(map->record, value);

Pls don't rename bpf_map_update_value_inner.
Instead add "if (map->map_type == BPF_MAP_TYPE_TASK_STORAGE)" case
inside bpf_map_update_value() and
do all of unpin/pin calls from there.

> +
> +       return err;
> +}
> +
>  static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
>                               __u64 flags)
>  {
> @@ -714,6 +869,11 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
>                                 field->kptr.dtor(xchgd_field);
>                         }
>                         break;
> +               case BPF_UPTR:
> +                       if (*(void **)field_ptr)
> +                               bpf_obj_unpin_uptr(field, *(void **)field_ptr);
> +                       *(void **)field_ptr = NULL;

This one will be called from
 task_storage_delete->bpf_selem_free->bpf_obj_free_fields

and even if upin was safe to do from that context
we cannot just do:
*(void **)field_ptr = NULL;

since bpf prog might be running in parallel,
it could have just read that addr and now is using it.

The first thought of a way to fix this was to split
bpf_obj_free_fields() into the current one plus
bpf_obj_free_fields_after_gp()
that will do the above unpin bit.
and call the later one from bpf_selem_free_rcu()
while bpf_obj_free_fields() from bpf_selem_free()
will not touch uptr.

But after digging further I realized that task_storage
already switched to use bpf_ma, so the above won't work.

So we need something similar to BPF_KPTR_REF logic:
xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
and then delay of uptr unpin for that address into call_rcu.

Any better ideas?

> +                       break;
>                 case BPF_LIST_HEAD:
>                         if (WARN_ON_ONCE(rec->spin_lock_off < 0))
>                                 continue;
> @@ -1099,7 +1259,7 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
>
>         map->record = btf_parse_fields(btf, value_type,
>                                        BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
> -                                      BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE,
> +                                      BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR,
>                                        map->value_size);
>         if (!IS_ERR_OR_NULL(map->record)) {
>                 int i;
> @@ -1155,6 +1315,12 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
>                                         goto free_map_tab;
>                                 }
>                                 break;
> +                       case BPF_UPTR:
> +                               if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) {
> +                                       ret = -EOPNOTSUPP;
> +                                       goto free_map_tab;
> +                               }
> +                               break;

I was thinking whether we need an additional check that bpf_obj_new()
cannot be used to allocate a prog supplied struct with uptr in it,
but we're good here, since we only allow
__btf_type_is_scalar_struct() for bpf_obj_new.
Martin KaFai Lau Sept. 4, 2024, 10:21 p.m. UTC | #2
On 8/28/24 4:24 PM, Alexei Starovoitov wrote:
>> @@ -714,6 +869,11 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
>>                                  field->kptr.dtor(xchgd_field);
>>                          }
>>                          break;
>> +               case BPF_UPTR:
>> +                       if (*(void **)field_ptr)
>> +                               bpf_obj_unpin_uptr(field, *(void **)field_ptr);
>> +                       *(void **)field_ptr = NULL;
> This one will be called from
>   task_storage_delete->bpf_selem_free->bpf_obj_free_fields
> 
> and even if upin was safe to do from that context
> we cannot just do:
> *(void **)field_ptr = NULL;
> 
> since bpf prog might be running in parallel,
> it could have just read that addr and now is using it.
> 
> The first thought of a way to fix this was to split
> bpf_obj_free_fields() into the current one plus
> bpf_obj_free_fields_after_gp()
> that will do the above unpin bit.
> and call the later one from bpf_selem_free_rcu()
> while bpf_obj_free_fields() from bpf_selem_free()
> will not touch uptr.
> 
> But after digging further I realized that task_storage
> already switched to use bpf_ma, so the above won't work.
> 
> So we need something similar to BPF_KPTR_REF logic:
> xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
> and then delay of uptr unpin for that address into call_rcu.
> 
> Any better ideas?

Many thanks to Kui-Feng starting this useful work on task storage. I will think 
about it and respin the set.
Martin KaFai Lau Sept. 6, 2024, 8:11 p.m. UTC | #3
On 9/4/24 3:21 PM, Martin KaFai Lau wrote:
> On 8/28/24 4:24 PM, Alexei Starovoitov wrote:
>>> @@ -714,6 +869,11 @@ void bpf_obj_free_fields(const struct btf_record *rec, 
>>> void *obj)
>>>                                  field->kptr.dtor(xchgd_field);
>>>                          }
>>>                          break;
>>> +               case BPF_UPTR:
>>> +                       if (*(void **)field_ptr)
>>> +                               bpf_obj_unpin_uptr(field, *(void **)field_ptr);
>>> +                       *(void **)field_ptr = NULL;
>> This one will be called from
>>   task_storage_delete->bpf_selem_free->bpf_obj_free_fields
>>
>> and even if upin was safe to do from that context
>> we cannot just do:
>> *(void **)field_ptr = NULL;
>>
>> since bpf prog might be running in parallel,
>> it could have just read that addr and now is using it.
>>
>> The first thought of a way to fix this was to split
>> bpf_obj_free_fields() into the current one plus
>> bpf_obj_free_fields_after_gp()
>> that will do the above unpin bit.
>> and call the later one from bpf_selem_free_rcu()
>> while bpf_obj_free_fields() from bpf_selem_free()
>> will not touch uptr.
>>
>> But after digging further I realized that task_storage
>> already switched to use bpf_ma, so the above won't work.
>>
>> So we need something similar to BPF_KPTR_REF logic:
>> xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
>> and then delay of uptr unpin for that address into call_rcu.
>>
>> Any better ideas?
> 

I think the existing reuse_now arg in the bpf_selem_free can be used. reuse_now 
(renamed from the earlier use_trace_rcu) was added to avoid call_rcu_tasks_trace 
for the common case.

selem (in type "struct bpf_local_storage_elem") is the one exposed to the bpf prog.

bpf_selem_free knows whether a selem can be reused immediately based on the 
caller. It is currently flagged in the reuse_now arg: "bpf_selem_free(...., bool 
reuse_now)".

If a selem cannot be reuse_now (i.e. == false), it is currently going through 
"call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu)". We can do 
unpin_user_page() in the rcu callback.

A selem can be reuse_now (i.e. == true) if the selem is no longer needed because 
either its owner (i.e. the task_struct here) is going away in free_task() or the 
bpf map is being destructed in bpf_local_storage_map_free(). No bpf prog should 
have a hold on the selem at this point. I think for these two cases, the 
unpin_user_page() can be directly called in bpf_selem_free().

One existing bug is, from looking at patch 6, I don't think the free_task() case 
can be "reuse_now == true" anymore because of the bpf_task_release kfunc did not 
mark the previously obtained task_storage to be invalid:

data_task = bpf_task_from_pid(parent_pid);
ptr = bpf_task_storage_get(&datamap, data_task, 0, ...);
bpf_task_release(data_task);
if (!ptr)
	return 0;
/* The prog still holds a valid task storage ptr. */
udata = ptr->udata;

It can be fixed by marking the ref_obj_id of the "ptr". Although it is more 
correct to make the task storage "ptr" invalid after task_release, it may break 
the existing progs.

The same issue probably is true for cgroup_storage. There is no release kfunc 
for inode and sk, so inode and sk storage should be fine.
Alexei Starovoitov Sept. 6, 2024, 11:44 p.m. UTC | #4
On Fri, Sep 6, 2024 at 1:11 PM Martin KaFai Lau <martin.lau@linux.dev> wrote:
>
> On 9/4/24 3:21 PM, Martin KaFai Lau wrote:
> > On 8/28/24 4:24 PM, Alexei Starovoitov wrote:
> >>> @@ -714,6 +869,11 @@ void bpf_obj_free_fields(const struct btf_record *rec,
> >>> void *obj)
> >>>                                  field->kptr.dtor(xchgd_field);
> >>>                          }
> >>>                          break;
> >>> +               case BPF_UPTR:
> >>> +                       if (*(void **)field_ptr)
> >>> +                               bpf_obj_unpin_uptr(field, *(void **)field_ptr);
> >>> +                       *(void **)field_ptr = NULL;
> >> This one will be called from
> >>   task_storage_delete->bpf_selem_free->bpf_obj_free_fields
> >>
> >> and even if upin was safe to do from that context
> >> we cannot just do:
> >> *(void **)field_ptr = NULL;
> >>
> >> since bpf prog might be running in parallel,
> >> it could have just read that addr and now is using it.
> >>
> >> The first thought of a way to fix this was to split
> >> bpf_obj_free_fields() into the current one plus
> >> bpf_obj_free_fields_after_gp()
> >> that will do the above unpin bit.
> >> and call the later one from bpf_selem_free_rcu()
> >> while bpf_obj_free_fields() from bpf_selem_free()
> >> will not touch uptr.
> >>
> >> But after digging further I realized that task_storage
> >> already switched to use bpf_ma, so the above won't work.
> >>
> >> So we need something similar to BPF_KPTR_REF logic:
> >> xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
> >> and then delay of uptr unpin for that address into call_rcu.
> >>
> >> Any better ideas?
> >
>
> I think the existing reuse_now arg in the bpf_selem_free can be used. reuse_now
> (renamed from the earlier use_trace_rcu) was added to avoid call_rcu_tasks_trace
> for the common case.
>
> selem (in type "struct bpf_local_storage_elem") is the one exposed to the bpf prog.
>
> bpf_selem_free knows whether a selem can be reused immediately based on the
> caller. It is currently flagged in the reuse_now arg: "bpf_selem_free(...., bool
> reuse_now)".
>
> If a selem cannot be reuse_now (i.e. == false), it is currently going through
> "call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu)". We can do
> unpin_user_page() in the rcu callback.
>
> A selem can be reuse_now (i.e. == true) if the selem is no longer needed because
> either its owner (i.e. the task_struct here) is going away in free_task() or the
> bpf map is being destructed in bpf_local_storage_map_free(). No bpf prog should
> have a hold on the selem at this point. I think for these two cases, the
> unpin_user_page() can be directly called in bpf_selem_free().

but there is also this path:
bpf_task_storage_delete -> task_storage_delete -> bpf_selem_free
 -> bpf_obj_free_fields

In this case bpf prog may still be looking at uptr address
and we cannot do unpin right away in bpf_obj_free_fields.
All other special fields in map value are ok,
since they are either relying on bpf_mem_alloc and
have rcu/rcu_tasks_trace gp
or extra indirection like timer/wq.

> One existing bug is, from looking at patch 6, I don't think the free_task() case
> can be "reuse_now == true" anymore because of the bpf_task_release kfunc did not
> mark the previously obtained task_storage to be invalid:
>
> data_task = bpf_task_from_pid(parent_pid);
> ptr = bpf_task_storage_get(&datamap, data_task, 0, ...);
> bpf_task_release(data_task);
> if (!ptr)
>         return 0;
> /* The prog still holds a valid task storage ptr. */
> udata = ptr->udata;
>
> It can be fixed by marking the ref_obj_id of the "ptr". Although it is more
> correct to make the task storage "ptr" invalid after task_release, it may break
> the existing progs.

Are you suggesting that bpf_task_release should invalidate all pointers
fetched from map value?
That will work, but it's not an issue for other special fields in there
like kptr.
So this invalidation would be need only for uptr which feels
weird to special case it and probably will be confusing to users writing
such programs.
Above bpf prog example should be ok to use.
We only need to delay unpin after rcu/rcu_task_trace gp.
Hence my proposal in bpf_obj_free_fields() do:
 case UPTR:
   xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
   call_rcu(...) to unpin.
Martin KaFai Lau Sept. 7, 2024, 1:32 a.m. UTC | #5
On 9/6/24 4:44 PM, Alexei Starovoitov wrote:
> On Fri, Sep 6, 2024 at 1:11 PM Martin KaFai Lau <martin.lau@linux.dev> wrote:
>>
>> On 9/4/24 3:21 PM, Martin KaFai Lau wrote:
>>> On 8/28/24 4:24 PM, Alexei Starovoitov wrote:
>>>>> @@ -714,6 +869,11 @@ void bpf_obj_free_fields(const struct btf_record *rec,
>>>>> void *obj)
>>>>>                                   field->kptr.dtor(xchgd_field);
>>>>>                           }
>>>>>                           break;
>>>>> +               case BPF_UPTR:
>>>>> +                       if (*(void **)field_ptr)
>>>>> +                               bpf_obj_unpin_uptr(field, *(void **)field_ptr);
>>>>> +                       *(void **)field_ptr = NULL;
>>>> This one will be called from
>>>>    task_storage_delete->bpf_selem_free->bpf_obj_free_fields
>>>>
>>>> and even if upin was safe to do from that context
>>>> we cannot just do:
>>>> *(void **)field_ptr = NULL;
>>>>
>>>> since bpf prog might be running in parallel,
>>>> it could have just read that addr and now is using it.
>>>>
>>>> The first thought of a way to fix this was to split
>>>> bpf_obj_free_fields() into the current one plus
>>>> bpf_obj_free_fields_after_gp()
>>>> that will do the above unpin bit.
>>>> and call the later one from bpf_selem_free_rcu()
>>>> while bpf_obj_free_fields() from bpf_selem_free()
>>>> will not touch uptr.
>>>>
>>>> But after digging further I realized that task_storage
>>>> already switched to use bpf_ma, so the above won't work.
>>>>
>>>> So we need something similar to BPF_KPTR_REF logic:
>>>> xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
>>>> and then delay of uptr unpin for that address into call_rcu.
>>>>
>>>> Any better ideas?
>>>
>>
>> I think the existing reuse_now arg in the bpf_selem_free can be used. reuse_now
>> (renamed from the earlier use_trace_rcu) was added to avoid call_rcu_tasks_trace
>> for the common case.
>>
>> selem (in type "struct bpf_local_storage_elem") is the one exposed to the bpf prog.
>>
>> bpf_selem_free knows whether a selem can be reused immediately based on the
>> caller. It is currently flagged in the reuse_now arg: "bpf_selem_free(...., bool
>> reuse_now)".
>>
>> If a selem cannot be reuse_now (i.e. == false), it is currently going through
>> "call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu)". We can do
>> unpin_user_page() in the rcu callback.
>>
>> A selem can be reuse_now (i.e. == true) if the selem is no longer needed because
>> either its owner (i.e. the task_struct here) is going away in free_task() or the
>> bpf map is being destructed in bpf_local_storage_map_free(). No bpf prog should
>> have a hold on the selem at this point. I think for these two cases, the
>> unpin_user_page() can be directly called in bpf_selem_free().
> 
> but there is also this path:
> bpf_task_storage_delete -> task_storage_delete -> bpf_selem_free
>   -> bpf_obj_free_fields
> 
> In this case bpf prog may still be looking at uptr address
> and we cannot do unpin right away in bpf_obj_free_fields.

cannot unpin immediately in the bpf_task_storage_delete() path is understood. 
task_storage can be used in sleepable. It needs to wait for the tasks_trace and 
the regular rcu gp before unpin.

I forgot to mention earlier that bpf_task_storage_delete() will have the 
bpf_selem_free(..., reuse_now == false). It will then do the 
"call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);". The unpin could 
happen in bpf_selem_free_trace_rcu() in this case. I am suggesting to unpin in 
bpf_selem_free_trace_rcu together with the selem free.

I just noticed the map and its btf_record are gone in 
bpf_selem_free_trace_rcu()... so won't work. :(

> All other special fields in map value are ok,
> since they are either relying on bpf_mem_alloc and
> have rcu/rcu_tasks_trace gp
> or extra indirection like timer/wq.
> 
>> One existing bug is, from looking at patch 6, I don't think the free_task() case
>> can be "reuse_now == true" anymore because of the bpf_task_release kfunc did not
>> mark the previously obtained task_storage to be invalid:
>>
>> data_task = bpf_task_from_pid(parent_pid);
>> ptr = bpf_task_storage_get(&datamap, data_task, 0, ...);
>> bpf_task_release(data_task);
>> if (!ptr)
>>          return 0;
>> /* The prog still holds a valid task storage ptr. */
>> udata = ptr->udata;
>>
>> It can be fixed by marking the ref_obj_id of the "ptr". Although it is more
>> correct to make the task storage "ptr" invalid after task_release, it may break
>> the existing progs.
> 
> Are you suggesting that bpf_task_release should invalidate all pointers
> fetched from map value?

I was thinking at least the map value ptr itself needs to be invalidated.

> That will work, but it's not an issue for other special fields in there
> like kptr.
> So this invalidation would be need only for uptr which feels
> weird to special case it and probably will be confusing to users writing
> such programs.

hmm... I haven't thought about the other pointer fields that read before the 
task_release().

Agreed, it is hard to use if only marks uptr invalid. Thinking about it. Even 
marking the map value ptr invalid while other previously read fields keep 
working is also the same weirdness.

> Above bpf prog example should be ok to use.
> We only need to delay unpin after rcu/rcu_task_trace gp.
> Hence my proposal in bpf_obj_free_fields() do:
>   case UPTR:
>     xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
>     call_rcu(...) to unpin.

Agree that call_rcu() here is the only option. It probably needs to go through 
the tasks_trace gp also.

Can the page->rcu_head be used here?
Martin KaFai Lau Sept. 7, 2024, 4:03 a.m. UTC | #6
On 9/6/24 6:32 PM, Martin KaFai Lau wrote:
> On 9/6/24 4:44 PM, Alexei Starovoitov wrote:
>> On Fri, Sep 6, 2024 at 1:11 PM Martin KaFai Lau <martin.lau@linux.dev> wrote:
>>>
>>> On 9/4/24 3:21 PM, Martin KaFai Lau wrote:
>>>> On 8/28/24 4:24 PM, Alexei Starovoitov wrote:
>>>>>> @@ -714,6 +869,11 @@ void bpf_obj_free_fields(const struct btf_record *rec,
>>>>>> void *obj)
>>>>>>                                   field->kptr.dtor(xchgd_field);
>>>>>>                           }
>>>>>>                           break;
>>>>>> +               case BPF_UPTR:
>>>>>> +                       if (*(void **)field_ptr)
>>>>>> +                               bpf_obj_unpin_uptr(field, *(void 
>>>>>> **)field_ptr);
>>>>>> +                       *(void **)field_ptr = NULL;
>>>>> This one will be called from
>>>>>    task_storage_delete->bpf_selem_free->bpf_obj_free_fields
>>>>>
>>>>> and even if upin was safe to do from that context
>>>>> we cannot just do:
>>>>> *(void **)field_ptr = NULL;
>>>>>
>>>>> since bpf prog might be running in parallel,
>>>>> it could have just read that addr and now is using it.
>>>>>
>>>>> The first thought of a way to fix this was to split
>>>>> bpf_obj_free_fields() into the current one plus
>>>>> bpf_obj_free_fields_after_gp()
>>>>> that will do the above unpin bit.
>>>>> and call the later one from bpf_selem_free_rcu()
>>>>> while bpf_obj_free_fields() from bpf_selem_free()
>>>>> will not touch uptr.
>>>>>
>>>>> But after digging further I realized that task_storage
>>>>> already switched to use bpf_ma, so the above won't work.
>>>>>
>>>>> So we need something similar to BPF_KPTR_REF logic:
>>>>> xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
>>>>> and then delay of uptr unpin for that address into call_rcu.
>>>>>
>>>>> Any better ideas?
>>>>
>>>
>>> I think the existing reuse_now arg in the bpf_selem_free can be used. reuse_now
>>> (renamed from the earlier use_trace_rcu) was added to avoid call_rcu_tasks_trace
>>> for the common case.
>>>
>>> selem (in type "struct bpf_local_storage_elem") is the one exposed to the bpf 
>>> prog.
>>>
>>> bpf_selem_free knows whether a selem can be reused immediately based on the
>>> caller. It is currently flagged in the reuse_now arg: "bpf_selem_free(...., bool
>>> reuse_now)".
>>>
>>> If a selem cannot be reuse_now (i.e. == false), it is currently going through
>>> "call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu)". We can do
>>> unpin_user_page() in the rcu callback.
>>>
>>> A selem can be reuse_now (i.e. == true) if the selem is no longer needed because
>>> either its owner (i.e. the task_struct here) is going away in free_task() or the
>>> bpf map is being destructed in bpf_local_storage_map_free(). No bpf prog should
>>> have a hold on the selem at this point. I think for these two cases, the
>>> unpin_user_page() can be directly called in bpf_selem_free().
>>
>> but there is also this path:
>> bpf_task_storage_delete -> task_storage_delete -> bpf_selem_free
>>   -> bpf_obj_free_fields
>>
>> In this case bpf prog may still be looking at uptr address
>> and we cannot do unpin right away in bpf_obj_free_fields.
> 
> cannot unpin immediately in the bpf_task_storage_delete() path is understood. 
> task_storage can be used in sleepable. It needs to wait for the tasks_trace and 
> the regular rcu gp before unpin.
> 
> I forgot to mention earlier that bpf_task_storage_delete() will have the 
> bpf_selem_free(..., reuse_now == false). It will then do the 
> "call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);". The unpin could 
> happen in bpf_selem_free_trace_rcu() in this case. I am suggesting to unpin in 
> bpf_selem_free_trace_rcu together with the selem free.
> 
> I just noticed the map and its btf_record are gone in 
> bpf_selem_free_trace_rcu()... so won't work. :(

Thought about it more. Adding a rcu_barrier() to bpf_local_storage_map_free() 
may be enough. Then bpf_selem_free_rcu() will have the map->record to unpin.
will need to think about it more.

> 
>> All other special fields in map value are ok,
>> since they are either relying on bpf_mem_alloc and
>> have rcu/rcu_tasks_trace gp
>> or extra indirection like timer/wq.
>>
>>> One existing bug is, from looking at patch 6, I don't think the free_task() case
>>> can be "reuse_now == true" anymore because of the bpf_task_release kfunc did not
>>> mark the previously obtained task_storage to be invalid:
>>>
>>> data_task = bpf_task_from_pid(parent_pid);
>>> ptr = bpf_task_storage_get(&datamap, data_task, 0, ...);
>>> bpf_task_release(data_task);
>>> if (!ptr)
>>>          return 0;
>>> /* The prog still holds a valid task storage ptr. */
>>> udata = ptr->udata;
>>>
>>> It can be fixed by marking the ref_obj_id of the "ptr". Although it is more
>>> correct to make the task storage "ptr" invalid after task_release, it may break
>>> the existing progs.
>>
>> Are you suggesting that bpf_task_release should invalidate all pointers
>> fetched from map value?
> 
> I was thinking at least the map value ptr itself needs to be invalidated.
> 
>> That will work, but it's not an issue for other special fields in there
>> like kptr.
>> So this invalidation would be need only for uptr which feels
>> weird to special case it and probably will be confusing to users writing
>> such programs.
> 
> hmm... I haven't thought about the other pointer fields that read before the 
> task_release().
> 
> Agreed, it is hard to use if only marks uptr invalid. Thinking about it. Even 
> marking the map value ptr invalid while other previously read fields keep 
> working is also the same weirdness.
> 
>> Above bpf prog example should be ok to use.
>> We only need to delay unpin after rcu/rcu_task_trace gp.
>> Hence my proposal in bpf_obj_free_fields() do:
>>   case UPTR:
>>     xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
>>     call_rcu(...) to unpin.
> 
> Agree that call_rcu() here is the only option. It probably needs to go through 
> the tasks_trace gp also.
> 
> Can the page->rcu_head be used here?
>
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 954e476b5605..886c818ff555 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -477,6 +477,8 @@  static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
 		data_race(*ldst++ = *lsrc++);
 }
 
+void bpf_obj_unpin_uptr(const struct btf_field *field, void *addr);
+
 /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
 static inline void bpf_obj_memcpy(struct btf_record *rec,
 				  void *dst, void *src, u32 size,
@@ -503,6 +505,34 @@  static inline void bpf_obj_memcpy(struct btf_record *rec,
 	memcpy(dst + curr_off, src + curr_off, size - curr_off);
 }
 
+static inline void bpf_obj_uptrcpy(struct btf_record *rec,
+				   void *dst, void *src)
+{
+	int i;
+
+	if (IS_ERR_OR_NULL(rec))
+		return;
+
+	for (i = 0; i < rec->cnt; i++) {
+		u32 next_off = rec->fields[i].offset;
+		void *addr;
+
+		if (rec->fields[i].type == BPF_UPTR) {
+			/* Unpin old address.
+			 *
+			 * Alignments are guaranteed by btf_find_field_one().
+			 */
+			addr = *(void **)(dst + next_off);
+			if (addr)
+				bpf_obj_unpin_uptr(&rec->fields[i], addr);
+
+			*(void **)(dst + next_off) = *(void **)(src + next_off);
+		}
+	}
+}
+
+void copy_map_uptr_locked(struct bpf_map *map, void *dst, void *src, bool lock_src);
+
 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
 {
 	bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index c938dea5ddbf..2fafad53b9d9 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -99,8 +99,11 @@  bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
 	}
 
 	if (selem) {
-		if (value)
+		if (value) {
 			copy_map_value(&smap->map, SDATA(selem)->data, value);
+			if (smap->map.map_type == BPF_MAP_TYPE_TASK_STORAGE)
+				bpf_obj_uptrcpy(smap->map.record, SDATA(selem)->data, value);
+		}
 		/* No need to call check_and_init_map_value as memory is zero init */
 		return selem;
 	}
@@ -575,8 +578,13 @@  bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
 		if (err)
 			return ERR_PTR(err);
 		if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
-			copy_map_value_locked(&smap->map, old_sdata->data,
-					      value, false);
+			if (smap->map.map_type == BPF_MAP_TYPE_TASK_STORAGE &&
+			    btf_record_has_field(smap->map.record, BPF_UPTR))
+				copy_map_uptr_locked(&smap->map, old_sdata->data,
+						     value, false);
+			else
+				copy_map_value_locked(&smap->map, old_sdata->data,
+						      value, false);
 			return old_sdata;
 		}
 	}
@@ -607,8 +615,13 @@  bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
 		goto unlock;
 
 	if (old_sdata && (map_flags & BPF_F_LOCK)) {
-		copy_map_value_locked(&smap->map, old_sdata->data, value,
-				      false);
+		if (smap->map.map_type == BPF_MAP_TYPE_TASK_STORAGE &&
+		    btf_record_has_field(smap->map.record, BPF_UPTR))
+			copy_map_uptr_locked(&smap->map, old_sdata->data,
+					     value, false);
+		else
+			copy_map_value_locked(&smap->map, old_sdata->data,
+					      value, false);
 		selem = SELEM(old_sdata);
 		goto unlock;
 	}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index d02ae323996b..d588b52605b9 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -388,6 +388,26 @@  void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
 	preempt_enable();
 }
 
+/* Copy map value and uptr from src to dst, with lock_src indicating
+ * whether src or dst is locked.
+ */
+void copy_map_uptr_locked(struct bpf_map *map, void *src, void *dst,
+			  bool lock_src)
+{
+	struct bpf_spin_lock *lock;
+
+	if (lock_src)
+		lock = src + map->record->spin_lock_off;
+	else
+		lock = dst + map->record->spin_lock_off;
+	preempt_disable();
+	__bpf_spin_lock_irqsave(lock);
+	copy_map_value(map, dst, src);
+	bpf_obj_uptrcpy(map->record, dst, src);
+	__bpf_spin_unlock_irqrestore(lock);
+	preempt_enable();
+}
+
 BPF_CALL_0(bpf_jiffies64)
 {
 	return get_jiffies_64();
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index fed4a2145f81..1854aeb13ff7 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -155,8 +155,140 @@  static void maybe_wait_bpf_programs(struct bpf_map *map)
 		synchronize_rcu();
 }
 
-static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
-				void *key, void *value, __u64 flags)
+void bpf_obj_unpin_uptr(const struct btf_field *field, void *addr)
+{
+	struct page *pages[1];
+	u32 size, type_id;
+	int npages;
+	void *ptr;
+
+	type_id = field->kptr.btf_id;
+	btf_type_id_size(field->kptr.btf, &type_id, &size);
+	if (size == 0)
+		return;
+
+	ptr = (void *)((intptr_t)addr & PAGE_MASK);
+
+	npages = (((intptr_t)addr + size + ~PAGE_MASK) - (intptr_t)ptr) >> PAGE_SHIFT;
+	if (WARN_ON_ONCE(npages > 1))
+		return;
+
+	pages[0] = virt_to_page(ptr);
+	unpin_user_pages(pages, 1);
+}
+
+/* Unpin uptr fields in the record up to cnt */
+static void bpf_obj_unpin_uptrs_cnt(struct btf_record *rec, int cnt, void *src)
+{
+	u32 next_off;
+	void **kaddr_ptr;
+	int i;
+
+	for (i = 0; i < cnt; i++) {
+		if (rec->fields[i].type != BPF_UPTR)
+			continue;
+
+		next_off = rec->fields[i].offset;
+		kaddr_ptr = src + next_off;
+		if (*kaddr_ptr) {
+			bpf_obj_unpin_uptr(&rec->fields[i], *kaddr_ptr);
+			*kaddr_ptr = NULL;
+		}
+	}
+}
+
+/* Find all BPF_UPTR fields in the record, pin the user memory, map it
+ * to kernel space, and update the addresses in the source memory.
+ *
+ * The map value passing from userspace may contain user kptrs pointing to
+ * user memory. This function pins the user memory and maps it to kernel
+ * memory so that BPF programs can access it.
+ */
+static int bpf_obj_trans_pin_uptrs(struct btf_record *rec, void *src, u32 size)
+{
+	u32 type_id, tsz, npages, next_off;
+	void *uaddr, *kaddr, **uaddr_ptr;
+	const struct btf_type *t;
+	struct page *pages[1];
+	int i, err;
+
+	if (IS_ERR_OR_NULL(rec))
+		return 0;
+
+	if (!btf_record_has_field(rec, BPF_UPTR))
+		return 0;
+
+	for (i = 0; i < rec->cnt; i++) {
+		if (rec->fields[i].type != BPF_UPTR)
+			continue;
+
+		next_off = rec->fields[i].offset;
+		if (next_off + sizeof(void *) > size) {
+			err = -EFAULT;
+			goto rollback;
+		}
+		uaddr_ptr = src + next_off;
+		uaddr = *uaddr_ptr;
+		if (!uaddr)
+			continue;
+
+		/* Make sure the user memory takes up at most one page */
+		type_id = rec->fields[i].kptr.btf_id;
+		t = btf_type_id_size(rec->fields[i].kptr.btf, &type_id, &tsz);
+		if (!t) {
+			err = -EFAULT;
+			goto rollback;
+		}
+		if (tsz == 0) {
+			*uaddr_ptr = NULL;
+			continue;
+		}
+		npages = (((intptr_t)uaddr + tsz + ~PAGE_MASK) -
+			  ((intptr_t)uaddr & PAGE_MASK)) >> PAGE_SHIFT;
+		if (npages > 1) {
+			/* Allow only one page */
+			err = -EFAULT;
+			goto rollback;
+		}
+
+		/* Pin the user memory */
+		err = pin_user_pages_fast((intptr_t)uaddr, 1, FOLL_LONGTERM | FOLL_WRITE, pages);
+		if (err < 0)
+			goto rollback;
+
+		/* Map to kernel space */
+		kaddr = page_address(pages[0]);
+		if (unlikely(!kaddr)) {
+			WARN_ON_ONCE(1);
+			unpin_user_pages(pages, 1);
+			err = -EFAULT;
+			goto rollback;
+		}
+		*uaddr_ptr = kaddr + ((intptr_t)uaddr & ~PAGE_MASK);
+	}
+
+	return 0;
+
+rollback:
+	/* Unpin the user memory of earlier fields */
+	bpf_obj_unpin_uptrs_cnt(rec, i, src);
+
+	return err;
+}
+
+static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *src)
+{
+	if (IS_ERR_OR_NULL(rec))
+		return;
+
+	if (!btf_record_has_field(rec, BPF_UPTR))
+		return;
+
+	bpf_obj_unpin_uptrs_cnt(rec, rec->cnt, src);
+}
+
+static int bpf_map_update_value_inner(struct bpf_map *map, struct file *map_file,
+				      void *key, void *value, __u64 flags)
 {
 	int err;
 
@@ -208,6 +340,29 @@  static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
 	return err;
 }
 
+static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
+				void *key, void *value, __u64 flags)
+{
+	int err;
+
+	if (map->map_type == BPF_MAP_TYPE_TASK_STORAGE) {
+		/* Pin user memory can lead to context switch, so we need
+		 * to do it before potential RCU lock.
+		 */
+		err = bpf_obj_trans_pin_uptrs(map->record, value,
+					      bpf_map_value_size(map));
+		if (err)
+			return err;
+	}
+
+	err = bpf_map_update_value_inner(map, map_file, key, value, flags);
+
+	if (err && map->map_type == BPF_MAP_TYPE_TASK_STORAGE)
+		bpf_obj_unpin_uptrs(map->record, value);
+
+	return err;
+}
+
 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
 			      __u64 flags)
 {
@@ -714,6 +869,11 @@  void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
 				field->kptr.dtor(xchgd_field);
 			}
 			break;
+		case BPF_UPTR:
+			if (*(void **)field_ptr)
+				bpf_obj_unpin_uptr(field, *(void **)field_ptr);
+			*(void **)field_ptr = NULL;
+			break;
 		case BPF_LIST_HEAD:
 			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
 				continue;
@@ -1099,7 +1259,7 @@  static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
 
 	map->record = btf_parse_fields(btf, value_type,
 				       BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
-				       BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE,
+				       BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR,
 				       map->value_size);
 	if (!IS_ERR_OR_NULL(map->record)) {
 		int i;
@@ -1155,6 +1315,12 @@  static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
 					goto free_map_tab;
 				}
 				break;
+			case BPF_UPTR:
+				if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) {
+					ret = -EOPNOTSUPP;
+					goto free_map_tab;
+				}
+				break;
 			case BPF_LIST_HEAD:
 			case BPF_RB_ROOT:
 				if (map->map_type != BPF_MAP_TYPE_HASH &&