diff mbox series

[v4,2/4] fs/proc/kcore: convert read_kcore() to read_kcore_iter()

Message ID a84da6cc458b44d949058b5f475ed3225008cfd9.1679431886.git.lstoakes@gmail.com (mailing list archive)
State Mainlined, archived
Headers show
Series convert read_kcore(), vread() to use iterators | expand

Commit Message

Lorenzo Stoakes March 21, 2023, 8:54 p.m. UTC
Now we have eliminated spinlocks from the vread() case, convert
read_kcore() to read_kcore_iter().

For the time being we still use a bounce buffer for vread(), however in the
next patch we will convert this to interact directly with the iterator and
eliminate the bounce buffer altogether.

Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
---
 fs/proc/kcore.c | 58 ++++++++++++++++++++++++-------------------------
 1 file changed, 29 insertions(+), 29 deletions(-)

Comments

Baoquan He March 22, 2023, 1:15 a.m. UTC | #1
Hi Lorenzo,

On 03/21/23 at 08:54pm, Lorenzo Stoakes wrote:
> Now we have eliminated spinlocks from the vread() case, convert
> read_kcore() to read_kcore_iter().

Sorry for late comment.

Here I could miss something important, I don't get where we have
eliminated spinlocks from the vread() case. Do I misunderstand this
sentence?

> 
> For the time being we still use a bounce buffer for vread(), however in the
> next patch we will convert this to interact directly with the iterator and
> eliminate the bounce buffer altogether.
> 
> Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
> ---
>  fs/proc/kcore.c | 58 ++++++++++++++++++++++++-------------------------
>  1 file changed, 29 insertions(+), 29 deletions(-)
> 
> diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
> index 556f310d6aa4..25e0eeb8d498 100644
> --- a/fs/proc/kcore.c
> +++ b/fs/proc/kcore.c
> @@ -24,7 +24,7 @@
>  #include <linux/memblock.h>
>  #include <linux/init.h>
>  #include <linux/slab.h>
> -#include <linux/uaccess.h>
> +#include <linux/uio.h>
>  #include <asm/io.h>
>  #include <linux/list.h>
>  #include <linux/ioport.h>
> @@ -308,9 +308,12 @@ static void append_kcore_note(char *notes, size_t *i, const char *name,
>  }
>  
>  static ssize_t
> -read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> +read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
>  {
> +	struct file *file = iocb->ki_filp;
>  	char *buf = file->private_data;
> +	loff_t *ppos = &iocb->ki_pos;
> +
>  	size_t phdrs_offset, notes_offset, data_offset;
>  	size_t page_offline_frozen = 1;
>  	size_t phdrs_len, notes_len;
> @@ -318,6 +321,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
>  	size_t tsz;
>  	int nphdr;
>  	unsigned long start;
> +	size_t buflen = iov_iter_count(iter);
>  	size_t orig_buflen = buflen;
>  	int ret = 0;
>  
> @@ -333,7 +337,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
>  	notes_offset = phdrs_offset + phdrs_len;
>  
>  	/* ELF file header. */
> -	if (buflen && *fpos < sizeof(struct elfhdr)) {
> +	if (buflen && *ppos < sizeof(struct elfhdr)) {
>  		struct elfhdr ehdr = {
>  			.e_ident = {
>  				[EI_MAG0] = ELFMAG0,
> @@ -355,19 +359,18 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
>  			.e_phnum = nphdr,
>  		};
>  
> -		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
> -		if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
> +		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *ppos);
> +		if (copy_to_iter((char *)&ehdr + *ppos, tsz, iter) != tsz) {
>  			ret = -EFAULT;
>  			goto out;
>  		}
>  
> -		buffer += tsz;
>  		buflen -= tsz;
> -		*fpos += tsz;
> +		*ppos += tsz;
>  	}
>  
>  	/* ELF program headers. */
> -	if (buflen && *fpos < phdrs_offset + phdrs_len) {
> +	if (buflen && *ppos < phdrs_offset + phdrs_len) {
>  		struct elf_phdr *phdrs, *phdr;
>  
>  		phdrs = kzalloc(phdrs_len, GFP_KERNEL);
> @@ -397,22 +400,21 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
>  			phdr++;
>  		}
>  
> -		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
> -		if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
> -				 tsz)) {
> +		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *ppos);
> +		if (copy_to_iter((char *)phdrs + *ppos - phdrs_offset, tsz,
> +				 iter) != tsz) {
>  			kfree(phdrs);
>  			ret = -EFAULT;
>  			goto out;
>  		}
>  		kfree(phdrs);
>  
> -		buffer += tsz;
>  		buflen -= tsz;
> -		*fpos += tsz;
> +		*ppos += tsz;
>  	}
>  
>  	/* ELF note segment. */
> -	if (buflen && *fpos < notes_offset + notes_len) {
> +	if (buflen && *ppos < notes_offset + notes_len) {
>  		struct elf_prstatus prstatus = {};
>  		struct elf_prpsinfo prpsinfo = {
>  			.pr_sname = 'R',
> @@ -447,24 +449,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
>  				  vmcoreinfo_data,
>  				  min(vmcoreinfo_size, notes_len - i));
>  
> -		tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
> -		if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
> +		tsz = min_t(size_t, buflen, notes_offset + notes_len - *ppos);
> +		if (copy_to_iter(notes + *ppos - notes_offset, tsz, iter) != tsz) {
>  			kfree(notes);
>  			ret = -EFAULT;
>  			goto out;
>  		}
>  		kfree(notes);
>  
> -		buffer += tsz;
>  		buflen -= tsz;
> -		*fpos += tsz;
> +		*ppos += tsz;
>  	}
>  
>  	/*
>  	 * Check to see if our file offset matches with any of
>  	 * the addresses in the elf_phdr on our list.
>  	 */
> -	start = kc_offset_to_vaddr(*fpos - data_offset);
> +	start = kc_offset_to_vaddr(*ppos - data_offset);
>  	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
>  		tsz = buflen;
>  
> @@ -497,7 +498,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
>  		}
>  
>  		if (!m) {
> -			if (clear_user(buffer, tsz)) {
> +			if (iov_iter_zero(tsz, iter) != tsz) {
>  				ret = -EFAULT;
>  				goto out;
>  			}
> @@ -508,14 +509,14 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
>  		case KCORE_VMALLOC:
>  			vread(buf, (char *)start, tsz);
>  			/* we have to zero-fill user buffer even if no read */
> -			if (copy_to_user(buffer, buf, tsz)) {
> +			if (copy_to_iter(buf, tsz, iter) != tsz) {
>  				ret = -EFAULT;
>  				goto out;
>  			}
>  			break;
>  		case KCORE_USER:
>  			/* User page is handled prior to normal kernel page: */
> -			if (copy_to_user(buffer, (char *)start, tsz)) {
> +			if (copy_to_iter((char *)start, tsz, iter) != tsz) {
>  				ret = -EFAULT;
>  				goto out;
>  			}
> @@ -531,7 +532,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
>  			 */
>  			if (!page || PageOffline(page) ||
>  			    is_page_hwpoison(page) || !pfn_is_ram(pfn)) {
> -				if (clear_user(buffer, tsz)) {
> +				if (iov_iter_zero(tsz, iter) != tsz) {
>  					ret = -EFAULT;
>  					goto out;
>  				}
> @@ -541,25 +542,24 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
>  		case KCORE_VMEMMAP:
>  		case KCORE_TEXT:
>  			/*
> -			 * We use _copy_to_user() to bypass usermode hardening
> +			 * We use _copy_to_iter() to bypass usermode hardening
>  			 * which would otherwise prevent this operation.
>  			 */
> -			if (_copy_to_user(buffer, (char *)start, tsz)) {
> +			if (_copy_to_iter((char *)start, tsz, iter) != tsz) {
>  				ret = -EFAULT;
>  				goto out;
>  			}
>  			break;
>  		default:
>  			pr_warn_once("Unhandled KCORE type: %d\n", m->type);
> -			if (clear_user(buffer, tsz)) {
> +			if (iov_iter_zero(tsz, iter) != tsz) {
>  				ret = -EFAULT;
>  				goto out;
>  			}
>  		}
>  skip:
>  		buflen -= tsz;
> -		*fpos += tsz;
> -		buffer += tsz;
> +		*ppos += tsz;
>  		start += tsz;
>  		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
>  	}
> @@ -603,7 +603,7 @@ static int release_kcore(struct inode *inode, struct file *file)
>  }
>  
>  static const struct proc_ops kcore_proc_ops = {
> -	.proc_read	= read_kcore,
> +	.proc_read_iter	= read_kcore_iter,
>  	.proc_open	= open_kcore,
>  	.proc_release	= release_kcore,
>  	.proc_lseek	= default_llseek,
> -- 
> 2.39.2
>
Lorenzo Stoakes March 22, 2023, 6:17 a.m. UTC | #2
On Wed, Mar 22, 2023 at 09:15:48AM +0800, Baoquan He wrote:
> Hi Lorenzo,
>
> On 03/21/23 at 08:54pm, Lorenzo Stoakes wrote:
> > Now we have eliminated spinlocks from the vread() case, convert
> > read_kcore() to read_kcore_iter().
>
> Sorry for late comment.
>
> Here I could miss something important, I don't get where we have
> eliminated spinlocks from the vread() case. Do I misunderstand this
> sentence?
>

Apologies, I didn't update the commit message after the latest revision! We
can just drop this sentence altogether.

Andrew - could you change the commit message to simply read:-

  For the time being we still use a bounce buffer for vread(), however in the
  next patch we will convert this to interact directly with the iterator and
  eliminate the bounce buffer altogether.

Thanks!

> >
> > For the time being we still use a bounce buffer for vread(), however in the
> > next patch we will convert this to interact directly with the iterator and
> > eliminate the bounce buffer altogether.
> >
> > Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
> > ---
> >  fs/proc/kcore.c | 58 ++++++++++++++++++++++++-------------------------
> >  1 file changed, 29 insertions(+), 29 deletions(-)
> >
> > diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
> > index 556f310d6aa4..25e0eeb8d498 100644
> > --- a/fs/proc/kcore.c
> > +++ b/fs/proc/kcore.c
> > @@ -24,7 +24,7 @@
> >  #include <linux/memblock.h>
> >  #include <linux/init.h>
> >  #include <linux/slab.h>
> > -#include <linux/uaccess.h>
> > +#include <linux/uio.h>
> >  #include <asm/io.h>
> >  #include <linux/list.h>
> >  #include <linux/ioport.h>
> > @@ -308,9 +308,12 @@ static void append_kcore_note(char *notes, size_t *i, const char *name,
> >  }
> >
> >  static ssize_t
> > -read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> > +read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
> >  {
> > +	struct file *file = iocb->ki_filp;
> >  	char *buf = file->private_data;
> > +	loff_t *ppos = &iocb->ki_pos;
> > +
> >  	size_t phdrs_offset, notes_offset, data_offset;
> >  	size_t page_offline_frozen = 1;
> >  	size_t phdrs_len, notes_len;
> > @@ -318,6 +321,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> >  	size_t tsz;
> >  	int nphdr;
> >  	unsigned long start;
> > +	size_t buflen = iov_iter_count(iter);
> >  	size_t orig_buflen = buflen;
> >  	int ret = 0;
> >
> > @@ -333,7 +337,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> >  	notes_offset = phdrs_offset + phdrs_len;
> >
> >  	/* ELF file header. */
> > -	if (buflen && *fpos < sizeof(struct elfhdr)) {
> > +	if (buflen && *ppos < sizeof(struct elfhdr)) {
> >  		struct elfhdr ehdr = {
> >  			.e_ident = {
> >  				[EI_MAG0] = ELFMAG0,
> > @@ -355,19 +359,18 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> >  			.e_phnum = nphdr,
> >  		};
> >
> > -		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
> > -		if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
> > +		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *ppos);
> > +		if (copy_to_iter((char *)&ehdr + *ppos, tsz, iter) != tsz) {
> >  			ret = -EFAULT;
> >  			goto out;
> >  		}
> >
> > -		buffer += tsz;
> >  		buflen -= tsz;
> > -		*fpos += tsz;
> > +		*ppos += tsz;
> >  	}
> >
> >  	/* ELF program headers. */
> > -	if (buflen && *fpos < phdrs_offset + phdrs_len) {
> > +	if (buflen && *ppos < phdrs_offset + phdrs_len) {
> >  		struct elf_phdr *phdrs, *phdr;
> >
> >  		phdrs = kzalloc(phdrs_len, GFP_KERNEL);
> > @@ -397,22 +400,21 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> >  			phdr++;
> >  		}
> >
> > -		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
> > -		if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
> > -				 tsz)) {
> > +		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *ppos);
> > +		if (copy_to_iter((char *)phdrs + *ppos - phdrs_offset, tsz,
> > +				 iter) != tsz) {
> >  			kfree(phdrs);
> >  			ret = -EFAULT;
> >  			goto out;
> >  		}
> >  		kfree(phdrs);
> >
> > -		buffer += tsz;
> >  		buflen -= tsz;
> > -		*fpos += tsz;
> > +		*ppos += tsz;
> >  	}
> >
> >  	/* ELF note segment. */
> > -	if (buflen && *fpos < notes_offset + notes_len) {
> > +	if (buflen && *ppos < notes_offset + notes_len) {
> >  		struct elf_prstatus prstatus = {};
> >  		struct elf_prpsinfo prpsinfo = {
> >  			.pr_sname = 'R',
> > @@ -447,24 +449,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> >  				  vmcoreinfo_data,
> >  				  min(vmcoreinfo_size, notes_len - i));
> >
> > -		tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
> > -		if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
> > +		tsz = min_t(size_t, buflen, notes_offset + notes_len - *ppos);
> > +		if (copy_to_iter(notes + *ppos - notes_offset, tsz, iter) != tsz) {
> >  			kfree(notes);
> >  			ret = -EFAULT;
> >  			goto out;
> >  		}
> >  		kfree(notes);
> >
> > -		buffer += tsz;
> >  		buflen -= tsz;
> > -		*fpos += tsz;
> > +		*ppos += tsz;
> >  	}
> >
> >  	/*
> >  	 * Check to see if our file offset matches with any of
> >  	 * the addresses in the elf_phdr on our list.
> >  	 */
> > -	start = kc_offset_to_vaddr(*fpos - data_offset);
> > +	start = kc_offset_to_vaddr(*ppos - data_offset);
> >  	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
> >  		tsz = buflen;
> >
> > @@ -497,7 +498,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> >  		}
> >
> >  		if (!m) {
> > -			if (clear_user(buffer, tsz)) {
> > +			if (iov_iter_zero(tsz, iter) != tsz) {
> >  				ret = -EFAULT;
> >  				goto out;
> >  			}
> > @@ -508,14 +509,14 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> >  		case KCORE_VMALLOC:
> >  			vread(buf, (char *)start, tsz);
> >  			/* we have to zero-fill user buffer even if no read */
> > -			if (copy_to_user(buffer, buf, tsz)) {
> > +			if (copy_to_iter(buf, tsz, iter) != tsz) {
> >  				ret = -EFAULT;
> >  				goto out;
> >  			}
> >  			break;
> >  		case KCORE_USER:
> >  			/* User page is handled prior to normal kernel page: */
> > -			if (copy_to_user(buffer, (char *)start, tsz)) {
> > +			if (copy_to_iter((char *)start, tsz, iter) != tsz) {
> >  				ret = -EFAULT;
> >  				goto out;
> >  			}
> > @@ -531,7 +532,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> >  			 */
> >  			if (!page || PageOffline(page) ||
> >  			    is_page_hwpoison(page) || !pfn_is_ram(pfn)) {
> > -				if (clear_user(buffer, tsz)) {
> > +				if (iov_iter_zero(tsz, iter) != tsz) {
> >  					ret = -EFAULT;
> >  					goto out;
> >  				}
> > @@ -541,25 +542,24 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> >  		case KCORE_VMEMMAP:
> >  		case KCORE_TEXT:
> >  			/*
> > -			 * We use _copy_to_user() to bypass usermode hardening
> > +			 * We use _copy_to_iter() to bypass usermode hardening
> >  			 * which would otherwise prevent this operation.
> >  			 */
> > -			if (_copy_to_user(buffer, (char *)start, tsz)) {
> > +			if (_copy_to_iter((char *)start, tsz, iter) != tsz) {
> >  				ret = -EFAULT;
> >  				goto out;
> >  			}
> >  			break;
> >  		default:
> >  			pr_warn_once("Unhandled KCORE type: %d\n", m->type);
> > -			if (clear_user(buffer, tsz)) {
> > +			if (iov_iter_zero(tsz, iter) != tsz) {
> >  				ret = -EFAULT;
> >  				goto out;
> >  			}
> >  		}
> >  skip:
> >  		buflen -= tsz;
> > -		*fpos += tsz;
> > -		buffer += tsz;
> > +		*ppos += tsz;
> >  		start += tsz;
> >  		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
> >  	}
> > @@ -603,7 +603,7 @@ static int release_kcore(struct inode *inode, struct file *file)
> >  }
> >
> >  static const struct proc_ops kcore_proc_ops = {
> > -	.proc_read	= read_kcore,
> > +	.proc_read_iter	= read_kcore_iter,
> >  	.proc_open	= open_kcore,
> >  	.proc_release	= release_kcore,
> >  	.proc_lseek	= default_llseek,
> > --
> > 2.39.2
> >
>
David Hildenbrand March 22, 2023, 11:12 a.m. UTC | #3
On 21.03.23 21:54, Lorenzo Stoakes wrote:
> Now we have eliminated spinlocks from the vread() case, convert
> read_kcore() to read_kcore_iter().
> 
> For the time being we still use a bounce buffer for vread(), however in the
> next patch we will convert this to interact directly with the iterator and
> eliminate the bounce buffer altogether.
> 
> Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
> ---
>   fs/proc/kcore.c | 58 ++++++++++++++++++++++++-------------------------
>   1 file changed, 29 insertions(+), 29 deletions(-)
> 
> diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
> index 556f310d6aa4..25e0eeb8d498 100644
> --- a/fs/proc/kcore.c
> +++ b/fs/proc/kcore.c
> @@ -24,7 +24,7 @@
>   #include <linux/memblock.h>
>   #include <linux/init.h>
>   #include <linux/slab.h>
> -#include <linux/uaccess.h>
> +#include <linux/uio.h>
>   #include <asm/io.h>
>   #include <linux/list.h>
>   #include <linux/ioport.h>
> @@ -308,9 +308,12 @@ static void append_kcore_note(char *notes, size_t *i, const char *name,
>   }
>   
>   static ssize_t
> -read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
> +read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
>   {
> +	struct file *file = iocb->ki_filp;
>   	char *buf = file->private_data;
> +	loff_t *ppos = &iocb->ki_pos;

Not renaming fpos -> ppos in this patch would result in less noise in 
this patch. Just like you didn't rename buflen.

In general, LGTM

Reviewed-by: David Hildenbrand <david@redhat.com>
diff mbox series

Patch

diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 556f310d6aa4..25e0eeb8d498 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -24,7 +24,7 @@ 
 #include <linux/memblock.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/uaccess.h>
+#include <linux/uio.h>
 #include <asm/io.h>
 #include <linux/list.h>
 #include <linux/ioport.h>
@@ -308,9 +308,12 @@  static void append_kcore_note(char *notes, size_t *i, const char *name,
 }
 
 static ssize_t
-read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
+	struct file *file = iocb->ki_filp;
 	char *buf = file->private_data;
+	loff_t *ppos = &iocb->ki_pos;
+
 	size_t phdrs_offset, notes_offset, data_offset;
 	size_t page_offline_frozen = 1;
 	size_t phdrs_len, notes_len;
@@ -318,6 +321,7 @@  read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 	size_t tsz;
 	int nphdr;
 	unsigned long start;
+	size_t buflen = iov_iter_count(iter);
 	size_t orig_buflen = buflen;
 	int ret = 0;
 
@@ -333,7 +337,7 @@  read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 	notes_offset = phdrs_offset + phdrs_len;
 
 	/* ELF file header. */
-	if (buflen && *fpos < sizeof(struct elfhdr)) {
+	if (buflen && *ppos < sizeof(struct elfhdr)) {
 		struct elfhdr ehdr = {
 			.e_ident = {
 				[EI_MAG0] = ELFMAG0,
@@ -355,19 +359,18 @@  read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 			.e_phnum = nphdr,
 		};
 
-		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
-		if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
+		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *ppos);
+		if (copy_to_iter((char *)&ehdr + *ppos, tsz, iter) != tsz) {
 			ret = -EFAULT;
 			goto out;
 		}
 
-		buffer += tsz;
 		buflen -= tsz;
-		*fpos += tsz;
+		*ppos += tsz;
 	}
 
 	/* ELF program headers. */
-	if (buflen && *fpos < phdrs_offset + phdrs_len) {
+	if (buflen && *ppos < phdrs_offset + phdrs_len) {
 		struct elf_phdr *phdrs, *phdr;
 
 		phdrs = kzalloc(phdrs_len, GFP_KERNEL);
@@ -397,22 +400,21 @@  read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 			phdr++;
 		}
 
-		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
-		if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
-				 tsz)) {
+		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *ppos);
+		if (copy_to_iter((char *)phdrs + *ppos - phdrs_offset, tsz,
+				 iter) != tsz) {
 			kfree(phdrs);
 			ret = -EFAULT;
 			goto out;
 		}
 		kfree(phdrs);
 
-		buffer += tsz;
 		buflen -= tsz;
-		*fpos += tsz;
+		*ppos += tsz;
 	}
 
 	/* ELF note segment. */
-	if (buflen && *fpos < notes_offset + notes_len) {
+	if (buflen && *ppos < notes_offset + notes_len) {
 		struct elf_prstatus prstatus = {};
 		struct elf_prpsinfo prpsinfo = {
 			.pr_sname = 'R',
@@ -447,24 +449,23 @@  read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 				  vmcoreinfo_data,
 				  min(vmcoreinfo_size, notes_len - i));
 
-		tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
-		if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
+		tsz = min_t(size_t, buflen, notes_offset + notes_len - *ppos);
+		if (copy_to_iter(notes + *ppos - notes_offset, tsz, iter) != tsz) {
 			kfree(notes);
 			ret = -EFAULT;
 			goto out;
 		}
 		kfree(notes);
 
-		buffer += tsz;
 		buflen -= tsz;
-		*fpos += tsz;
+		*ppos += tsz;
 	}
 
 	/*
 	 * Check to see if our file offset matches with any of
 	 * the addresses in the elf_phdr on our list.
 	 */
-	start = kc_offset_to_vaddr(*fpos - data_offset);
+	start = kc_offset_to_vaddr(*ppos - data_offset);
 	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
 		tsz = buflen;
 
@@ -497,7 +498,7 @@  read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 		}
 
 		if (!m) {
-			if (clear_user(buffer, tsz)) {
+			if (iov_iter_zero(tsz, iter) != tsz) {
 				ret = -EFAULT;
 				goto out;
 			}
@@ -508,14 +509,14 @@  read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 		case KCORE_VMALLOC:
 			vread(buf, (char *)start, tsz);
 			/* we have to zero-fill user buffer even if no read */
-			if (copy_to_user(buffer, buf, tsz)) {
+			if (copy_to_iter(buf, tsz, iter) != tsz) {
 				ret = -EFAULT;
 				goto out;
 			}
 			break;
 		case KCORE_USER:
 			/* User page is handled prior to normal kernel page: */
-			if (copy_to_user(buffer, (char *)start, tsz)) {
+			if (copy_to_iter((char *)start, tsz, iter) != tsz) {
 				ret = -EFAULT;
 				goto out;
 			}
@@ -531,7 +532,7 @@  read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 			 */
 			if (!page || PageOffline(page) ||
 			    is_page_hwpoison(page) || !pfn_is_ram(pfn)) {
-				if (clear_user(buffer, tsz)) {
+				if (iov_iter_zero(tsz, iter) != tsz) {
 					ret = -EFAULT;
 					goto out;
 				}
@@ -541,25 +542,24 @@  read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 		case KCORE_VMEMMAP:
 		case KCORE_TEXT:
 			/*
-			 * We use _copy_to_user() to bypass usermode hardening
+			 * We use _copy_to_iter() to bypass usermode hardening
 			 * which would otherwise prevent this operation.
 			 */
-			if (_copy_to_user(buffer, (char *)start, tsz)) {
+			if (_copy_to_iter((char *)start, tsz, iter) != tsz) {
 				ret = -EFAULT;
 				goto out;
 			}
 			break;
 		default:
 			pr_warn_once("Unhandled KCORE type: %d\n", m->type);
-			if (clear_user(buffer, tsz)) {
+			if (iov_iter_zero(tsz, iter) != tsz) {
 				ret = -EFAULT;
 				goto out;
 			}
 		}
 skip:
 		buflen -= tsz;
-		*fpos += tsz;
-		buffer += tsz;
+		*ppos += tsz;
 		start += tsz;
 		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
 	}
@@ -603,7 +603,7 @@  static int release_kcore(struct inode *inode, struct file *file)
 }
 
 static const struct proc_ops kcore_proc_ops = {
-	.proc_read	= read_kcore,
+	.proc_read_iter	= read_kcore_iter,
 	.proc_open	= open_kcore,
 	.proc_release	= release_kcore,
 	.proc_lseek	= default_llseek,