Message ID | 20231013160423.2218093-12-dhowells@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | netfs, afs, cifs: Delegate high-level I/O to netfslib | expand |
On Fri, 2023-10-13 at 17:03 +0100, David Howells wrote: > Add a bvec array pointer and an iterator to netfs_io_request for either > holding a copy of a DIO iterator or a list of all the bits of buffer > pointed to by a DIO iterator. > > There are two problems: Firstly, if an iovec-class iov_iter is passed to > ->read_iter() or ->write_iter(), this cannot be passed directly to > kernel_sendmsg() or kernel_recvmsg() as that may cause locking recursion if > a fault is generated, so we need to keep track of the pages involved > separately. > > Secondly, if the I/O is asynchronous, we must copy the iov_iter describing > the buffer before returning to the caller as it may be immediately > deallocated. > > Signed-off-by: David Howells <dhowells@redhat.com> > cc: Jeff Layton <jlayton@kernel.org> > cc: linux-cachefs@redhat.com > cc: linux-fsdevel@vger.kernel.org > cc: linux-mm@kvack.org > --- > fs/netfs/objects.c | 10 ++++++++++ > include/linux/netfs.h | 3 +++ > 2 files changed, 13 insertions(+) > > diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c > index 8e92b8401aaa..4396318081bf 100644 > --- a/fs/netfs/objects.c > +++ b/fs/netfs/objects.c > @@ -78,6 +78,7 @@ static void netfs_free_request(struct work_struct *work) > { > struct netfs_io_request *rreq = > container_of(work, struct netfs_io_request, work); > + unsigned int i; > > trace_netfs_rreq(rreq, netfs_rreq_trace_free); > netfs_proc_del_rreq(rreq); > @@ -86,6 +87,15 @@ static void netfs_free_request(struct work_struct *work) > rreq->netfs_ops->free_request(rreq); > if (rreq->cache_resources.ops) > rreq->cache_resources.ops->end_operation(&rreq->cache_resources); > + if (rreq->direct_bv) { > + for (i = 0; i < rreq->direct_bv_count; i++) { > + if (rreq->direct_bv[i].bv_page) { > + if (rreq->direct_bv_unpin) > + unpin_user_page(rreq->direct_bv[i].bv_page); > + } > + } > + kvfree(rreq->direct_bv); > + } > kfree_rcu(rreq, rcu); > netfs_stat_d(&netfs_n_rh_rreq); > } > diff --git a/include/linux/netfs.h b/include/linux/netfs.h > index bd0437088f0e..66479a61ad00 100644 > --- a/include/linux/netfs.h > +++ b/include/linux/netfs.h > @@ -191,7 +191,9 @@ struct netfs_io_request { > struct list_head subrequests; /* Contributory I/O operations */ > struct iov_iter iter; /* Unencrypted-side iterator */ > struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ > + struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ > void *netfs_priv; /* Private data for the netfs */ > + unsigned int direct_bv_count; /* Number of elements in bv[] */ nit: "number of elements in direct_bv[]" Also, just for better readability, can you swap direct_bv and netfs_priv? Then at least the array and count are together. > unsigned int debug_id; > unsigned int rsize; /* Maximum read size (0 for none) */ > atomic_t nr_outstanding; /* Number of ops in progress */ > @@ -200,6 +202,7 @@ struct netfs_io_request { > size_t len; /* Length of the request */ > short error; /* 0 or error that occurred */ > enum netfs_io_origin origin; /* Origin of the request */ > + bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ > loff_t i_size; /* Size of the file */ > loff_t start; /* Start position */ > pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ >
Jeff Layton <jlayton@kernel.org> wrote: > > + struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ > > void *netfs_priv; /* Private data for the netfs */ > > + unsigned int direct_bv_count; /* Number of elements in bv[] */ > > nit: "number of elements in direct_bv[]" > > Also, just for better readability, can you swap direct_bv and > netfs_priv? Then at least the array and count are together. Yeah - and stick a __counted_by() on too. David
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 8e92b8401aaa..4396318081bf 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -78,6 +78,7 @@ static void netfs_free_request(struct work_struct *work) { struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); + unsigned int i; trace_netfs_rreq(rreq, netfs_rreq_trace_free); netfs_proc_del_rreq(rreq); @@ -86,6 +87,15 @@ static void netfs_free_request(struct work_struct *work) rreq->netfs_ops->free_request(rreq); if (rreq->cache_resources.ops) rreq->cache_resources.ops->end_operation(&rreq->cache_resources); + if (rreq->direct_bv) { + for (i = 0; i < rreq->direct_bv_count; i++) { + if (rreq->direct_bv[i].bv_page) { + if (rreq->direct_bv_unpin) + unpin_user_page(rreq->direct_bv[i].bv_page); + } + } + kvfree(rreq->direct_bv); + } kfree_rcu(rreq, rcu); netfs_stat_d(&netfs_n_rh_rreq); } diff --git a/include/linux/netfs.h b/include/linux/netfs.h index bd0437088f0e..66479a61ad00 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -191,7 +191,9 @@ struct netfs_io_request { struct list_head subrequests; /* Contributory I/O operations */ struct iov_iter iter; /* Unencrypted-side iterator */ struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ + struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ void *netfs_priv; /* Private data for the netfs */ + unsigned int direct_bv_count; /* Number of elements in bv[] */ unsigned int debug_id; unsigned int rsize; /* Maximum read size (0 for none) */ atomic_t nr_outstanding; /* Number of ops in progress */ @@ -200,6 +202,7 @@ struct netfs_io_request { size_t len; /* Length of the request */ short error; /* 0 or error that occurred */ enum netfs_io_origin origin; /* Origin of the request */ + bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ loff_t i_size; /* Size of the file */ loff_t start; /* Start position */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
Add a bvec array pointer and an iterator to netfs_io_request for either holding a copy of a DIO iterator or a list of all the bits of buffer pointed to by a DIO iterator. There are two problems: Firstly, if an iovec-class iov_iter is passed to ->read_iter() or ->write_iter(), this cannot be passed directly to kernel_sendmsg() or kernel_recvmsg() as that may cause locking recursion if a fault is generated, so we need to keep track of the pages involved separately. Secondly, if the I/O is asynchronous, we must copy the iov_iter describing the buffer before returning to the caller as it may be immediately deallocated. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org --- fs/netfs/objects.c | 10 ++++++++++ include/linux/netfs.h | 3 +++ 2 files changed, 13 insertions(+)