Message ID | ZivhG0yrbpFqORDw@casper.infradead.org (mailing list archive) |
---|---|
State | RFC |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | [RFC] Make find_tcp_vma() more efficient | expand |
On Fri, Apr 26, 2024 at 10:15 AM Matthew Wilcox <willy@infradead.org> wrote: > > Liam asked me if we could do away with the "bool *mmap_locked" > parameter, and the problem is that some architctures don't support > CONFIG_PER_VMA_LOCK yet. But we can abstract it ... something like this > maybe? > > (not particularly proposing this for inclusion; just wrote it and want > to get it out of my tree so I can get back to other projects. If anyone > wants it, they can test it and submit it for inclusion and stick my > S-o-B on it) I went through all uses of vma_end_read() to convince myself this is safe with CONFIG_PER_VMA_LOCK=n and the change seems fine from correctness POV. However the fact that in this configuration lock_vma_under_mmap_lock()==NOP and vma_end_read()==mmap_read_unlock() does not feel right to me. Current code is more explicit about which lock is held and I think it's easier to understand. A new interface like below might be a bit better but I'm not sure if it's worth it: #ifdef CONFIG_PER_VMA_LOCK static inline void mmap_to_vma_lock(struct vm_area_struct *vma) { down_read(&vma->vm_lock->lock); mmap_read_unlock(vma->vm_mm); } static inline void mmap_or_vma_unlock(struct vm_area_struct *vma) { vma_end_read(vma); } #else /* CONFIG_PER_VMA_LOCK */ static inline void mmap_to_vma_lock(struct vm_area_struct *vma) {} static inline void mmap_or_vma_unlock(struct vm_area_struct *vma) { mmap_read_unlock(vma->vm_mm); } #endif /* CONFIG_PER_VMA_LOCK */ > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 9849dfda44d4..570763351508 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -779,11 +779,22 @@ static inline void assert_fault_locked(struct vm_fault *vmf) > struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, > unsigned long address); > > +static inline void lock_vma_under_mmap_lock(struct vm_area_struct *vma) > +{ > + down_read(&vma->vm_lock->lock); > + mmap_read_unlock(vma->vm_mm); > +} > + > #else /* CONFIG_PER_VMA_LOCK */ > > static inline bool vma_start_read(struct vm_area_struct *vma) > { return false; } > -static inline void vma_end_read(struct vm_area_struct *vma) {} > +static inline void vma_end_read(struct vm_area_struct *vma) > +{ > + mmap_read_unlock(vma->vm_mm); > +} > + > +static inline void lock_vma_under_mmap_lock(struct vm_area_struct *vma) {} > static inline void vma_start_write(struct vm_area_struct *vma) {} > static inline void vma_assert_write_locked(struct vm_area_struct *vma) > { mmap_assert_write_locked(vma->vm_mm); } > diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c > index f23b97777ea5..e763916e5185 100644 > --- a/net/ipv4/tcp.c > +++ b/net/ipv4/tcp.c > @@ -2051,27 +2051,25 @@ static void tcp_zc_finalize_rx_tstamp(struct sock *sk, > } > > static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, > - unsigned long address, > - bool *mmap_locked) > + unsigned long address) > { > struct vm_area_struct *vma = lock_vma_under_rcu(mm, address); > > - if (vma) { > - if (vma->vm_ops != &tcp_vm_ops) { > - vma_end_read(vma); > + if (!vma) { > + mmap_read_lock(mm); > + vma = vma_lookup(mm, address); > + if (vma) { > + lock_vma_under_mmap_lock(vma); > + } else { > + mmap_read_unlock(mm); > return NULL; > } > - *mmap_locked = false; > - return vma; > } > - > - mmap_read_lock(mm); > - vma = vma_lookup(mm, address); > - if (!vma || vma->vm_ops != &tcp_vm_ops) { > - mmap_read_unlock(mm); > + if (vma->vm_ops != &tcp_vm_ops) { > + vma_end_read(vma); > return NULL; > } > - *mmap_locked = true; > + > return vma; > } > > @@ -2092,7 +2090,6 @@ static int tcp_zerocopy_receive(struct sock *sk, > u32 seq = tp->copied_seq; > u32 total_bytes_to_map; > int inq = tcp_inq(sk); > - bool mmap_locked; > int ret; > > zc->copybuf_len = 0; > @@ -2117,7 +2114,7 @@ static int tcp_zerocopy_receive(struct sock *sk, > return 0; > } > > - vma = find_tcp_vma(current->mm, address, &mmap_locked); > + vma = find_tcp_vma(current->mm, address); > if (!vma) > return -EINVAL; > > @@ -2194,10 +2191,7 @@ static int tcp_zerocopy_receive(struct sock *sk, > zc, total_bytes_to_map); > } > out: > - if (mmap_locked) > - mmap_read_unlock(current->mm); > - else > - vma_end_read(vma); > + vma_end_read(vma); > /* Try to copy straggler data. */ > if (!ret) > copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss);
* Suren Baghdasaryan <surenb@google.com> [240503 18:29]: > On Fri, Apr 26, 2024 at 10:15 AM Matthew Wilcox <willy@infradead.org> wrote: > > > > Liam asked me if we could do away with the "bool *mmap_locked" > > parameter, and the problem is that some architctures don't support > > CONFIG_PER_VMA_LOCK yet. But we can abstract it ... something like this > > maybe? > > > > (not particularly proposing this for inclusion; just wrote it and want > > to get it out of my tree so I can get back to other projects. If anyone > > wants it, they can test it and submit it for inclusion and stick my > > S-o-B on it) > > I went through all uses of vma_end_read() to convince myself this is > safe with CONFIG_PER_VMA_LOCK=n and the change seems fine from > correctness POV. However the fact that in this configuration > lock_vma_under_mmap_lock()==NOP and vma_end_read()==mmap_read_unlock() > does not feel right to me. Current code is more explicit about which > lock is held and I think it's easier to understand. > A new interface like below might be a bit better but I'm not sure if > it's worth it: > ... We could do something like we do in the release_fault_lock(), but without using the FAULT flag.. /* Naming is hard. */ static inline void release_vma_modification_lock( struct vm_area_struct *vma) { #ifdef CONFIG_PER_VMA_LOCK if (rwsem_is_locked(&vma->vm_lock->lock)) vma_end_read(vma); else mmap_read_unlock(vma->vm_mm); #else mmap_read_unlock(vma->vm_mm); #endif }
diff --git a/include/linux/mm.h b/include/linux/mm.h index 9849dfda44d4..570763351508 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -779,11 +779,22 @@ static inline void assert_fault_locked(struct vm_fault *vmf) struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address); +static inline void lock_vma_under_mmap_lock(struct vm_area_struct *vma) +{ + down_read(&vma->vm_lock->lock); + mmap_read_unlock(vma->vm_mm); +} + #else /* CONFIG_PER_VMA_LOCK */ static inline bool vma_start_read(struct vm_area_struct *vma) { return false; } -static inline void vma_end_read(struct vm_area_struct *vma) {} +static inline void vma_end_read(struct vm_area_struct *vma) +{ + mmap_read_unlock(vma->vm_mm); +} + +static inline void lock_vma_under_mmap_lock(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} static inline void vma_assert_write_locked(struct vm_area_struct *vma) { mmap_assert_write_locked(vma->vm_mm); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f23b97777ea5..e763916e5185 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2051,27 +2051,25 @@ static void tcp_zc_finalize_rx_tstamp(struct sock *sk, } static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, - unsigned long address, - bool *mmap_locked) + unsigned long address) { struct vm_area_struct *vma = lock_vma_under_rcu(mm, address); - if (vma) { - if (vma->vm_ops != &tcp_vm_ops) { - vma_end_read(vma); + if (!vma) { + mmap_read_lock(mm); + vma = vma_lookup(mm, address); + if (vma) { + lock_vma_under_mmap_lock(vma); + } else { + mmap_read_unlock(mm); return NULL; } - *mmap_locked = false; - return vma; } - - mmap_read_lock(mm); - vma = vma_lookup(mm, address); - if (!vma || vma->vm_ops != &tcp_vm_ops) { - mmap_read_unlock(mm); + if (vma->vm_ops != &tcp_vm_ops) { + vma_end_read(vma); return NULL; } - *mmap_locked = true; + return vma; } @@ -2092,7 +2090,6 @@ static int tcp_zerocopy_receive(struct sock *sk, u32 seq = tp->copied_seq; u32 total_bytes_to_map; int inq = tcp_inq(sk); - bool mmap_locked; int ret; zc->copybuf_len = 0; @@ -2117,7 +2114,7 @@ static int tcp_zerocopy_receive(struct sock *sk, return 0; } - vma = find_tcp_vma(current->mm, address, &mmap_locked); + vma = find_tcp_vma(current->mm, address); if (!vma) return -EINVAL; @@ -2194,10 +2191,7 @@ static int tcp_zerocopy_receive(struct sock *sk, zc, total_bytes_to_map); } out: - if (mmap_locked) - mmap_read_unlock(current->mm); - else - vma_end_read(vma); + vma_end_read(vma); /* Try to copy straggler data. */ if (!ret) copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss);