[4/7] x86/sgx: Allow userspace to add multiple pages in single ioctl()
diff mbox series

Message ID 20190605194845.926-5-sean.j.christopherson@intel.com
State New
Headers show
Series
  • x86/sgx: Clean up and enhance add pages ioctl
Related show

Commit Message

Sean Christopherson June 5, 2019, 7:48 p.m. UTC
...to improve performance when building enclaves by reducing the number
of user<->system transitions.  Rather than provide arbitrary batching,
e.g. with per-page SECINFO and mrmask, take advantage of the fact that
any sane enclave will have large swaths of pages with identical
properties, e.g. code vs. data sections.

For simplicity and stability in the initial implementation, loop over
the existing add page flow instead of taking a more agressive approach,
which would require tracking transitions between VMAs and holding
mmap_sem for an extended duration.

On an error, update the userspace struct to reflect progress made, e.g.
so that the ioctl can be re-invoked to finish adding pages after a non-
fatal error.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 Documentation/x86/sgx/3.API.rst        |   2 +-
 arch/x86/include/uapi/asm/sgx.h        |  21 ++--
 arch/x86/kernel/cpu/sgx/driver/ioctl.c | 128 +++++++++++++++++--------
 3 files changed, 98 insertions(+), 53 deletions(-)

Comments

Jarkko Sakkinen June 6, 2019, 3:47 p.m. UTC | #1
On Wed, Jun 05, 2019 at 12:48:42PM -0700, Sean Christopherson wrote:
> ...to improve performance when building enclaves by reducing the number
> of user<->system transitions.  Rather than provide arbitrary batching,
> e.g. with per-page SECINFO and mrmask, take advantage of the fact that
> any sane enclave will have large swaths of pages with identical
> properties, e.g. code vs. data sections.
> 
> For simplicity and stability in the initial implementation, loop over
> the existing add page flow instead of taking a more agressive approach,
> which would require tracking transitions between VMAs and holding
> mmap_sem for an extended duration.
> 
> On an error, update the userspace struct to reflect progress made, e.g.
> so that the ioctl can be re-invoked to finish adding pages after a non-
> fatal error.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>

Probably not going to look at this before other things are settled.

/Jarkko
Jethro Beekman June 13, 2019, 12:43 a.m. UTC | #2
On 2019-06-05 12:48, Sean Christopherson wrote:
> ...to improve performance when building enclaves by reducing the number
> of user<->system transitions.  Rather than provide arbitrary batching,
> e.g. with per-page SECINFO and mrmask, take advantage of the fact that
> any sane enclave will have large swaths of pages with identical
> properties, e.g. code vs. data sections.
> 
> For simplicity and stability in the initial implementation, loop over
> the existing add page flow instead of taking a more agressive approach,
> which would require tracking transitions between VMAs and holding
> mmap_sem for an extended duration.
> 
> On an error, update the userspace struct to reflect progress made, e.g.
> so that the ioctl can be re-invoked to finish adding pages after a non-
> fatal error.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>   Documentation/x86/sgx/3.API.rst        |   2 +-
>   arch/x86/include/uapi/asm/sgx.h        |  21 ++--
>   arch/x86/kernel/cpu/sgx/driver/ioctl.c | 128 +++++++++++++++++--------
>   3 files changed, 98 insertions(+), 53 deletions(-)
> 
> diff --git a/Documentation/x86/sgx/3.API.rst b/Documentation/x86/sgx/3.API.rst
> index b113aeb05f54..44550aa41073 100644
> --- a/Documentation/x86/sgx/3.API.rst
> +++ b/Documentation/x86/sgx/3.API.rst
> @@ -22,6 +22,6 @@ controls the `PROVISON_KEY` attribute.
>   
>   .. kernel-doc:: arch/x86/kernel/cpu/sgx/driver/ioctl.c
>      :functions: sgx_ioc_enclave_create
> -               sgx_ioc_enclave_add_page
> +               sgx_ioc_enclave_add_region
>                  sgx_ioc_enclave_init
>                  sgx_ioc_enclave_set_attribute
> diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
> index 9ed690a38c70..30d114f6b3bd 100644
> --- a/arch/x86/include/uapi/asm/sgx.h
> +++ b/arch/x86/include/uapi/asm/sgx.h
> @@ -12,8 +12,8 @@
>   
>   #define SGX_IOC_ENCLAVE_CREATE \
>   	_IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
> -#define SGX_IOC_ENCLAVE_ADD_PAGE \
> -	_IOW(SGX_MAGIC, 0x01, struct sgx_enclave_add_page)
> +#define SGX_IOC_ENCLAVE_ADD_REGION \
> +	_IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_region)
>   #define SGX_IOC_ENCLAVE_INIT \
>   	_IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
>   #define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \
> @@ -32,21 +32,22 @@ struct sgx_enclave_create  {
>   };
>   
>   /**
> - * struct sgx_enclave_add_page - parameter structure for the
> - *                               %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
> - * @addr:	address within the ELRANGE
> - * @src:	address for the page data
> - * @secinfo:	address for the SECINFO data
> - * @mrmask:	bitmask for the measured 256 byte chunks
> + * struct sgx_enclave_add_region - parameter structure for the
> + *                                 %SGX_IOC_ENCLAVE_ADD_REGION ioctl
> + * @addr:	start address within the ELRANGE
> + * @src:	start address for the pages' data
> + * @size:	size of region, in bytes
> + * @secinfo:	address of the SECINFO data (common to the entire region)
> + * @mrmask:	bitmask of 256 byte chunks to measure (applied per 4k page)
>    */
> -struct sgx_enclave_add_page {
> +struct sgx_enclave_add_region {
>   	__u64	addr;
>   	__u64	src;
> +	__u64	size;
>   	__u64	secinfo;
>   	__u16	mrmask;

Considering:

1. I might want to load multiple pages that are not consecutive in memory.
2. Repeating mrmask (other than 0 or ~0) doesn't really make sense for 
ranges.

I'd be in favor of an approach that passes an array of 
sgx_enclave_add_page instead.

Somewhat unrelated: have you considered optionally "gifting" enclave 
source pages to the kernel (as in vmsplice)? That would avoid the 
copy_from_user.

--
Jethro Beekman | Fortanix
Sean Christopherson June 13, 2019, 4:51 p.m. UTC | #3
On Thu, Jun 13, 2019 at 12:43:42AM +0000, Jethro Beekman wrote:
> On 2019-06-05 12:48, Sean Christopherson wrote:
> >...to improve performance when building enclaves by reducing the number
> >of user<->system transitions.  Rather than provide arbitrary batching,
> >e.g. with per-page SECINFO and mrmask, take advantage of the fact that
> >any sane enclave will have large swaths of pages with identical
> >properties, e.g. code vs. data sections.
> >
> >For simplicity and stability in the initial implementation, loop over
> >the existing add page flow instead of taking a more agressive approach,
> >which would require tracking transitions between VMAs and holding
> >mmap_sem for an extended duration.
> >
> >On an error, update the userspace struct to reflect progress made, e.g.
> >so that the ioctl can be re-invoked to finish adding pages after a non-
> >fatal error.
> >
> >Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> >---
> >  Documentation/x86/sgx/3.API.rst        |   2 +-
> >  arch/x86/include/uapi/asm/sgx.h        |  21 ++--
> >  arch/x86/kernel/cpu/sgx/driver/ioctl.c | 128 +++++++++++++++++--------
> >  3 files changed, 98 insertions(+), 53 deletions(-)
> >
> >diff --git a/Documentation/x86/sgx/3.API.rst b/Documentation/x86/sgx/3.API.rst
> >index b113aeb05f54..44550aa41073 100644
> >--- a/Documentation/x86/sgx/3.API.rst
> >+++ b/Documentation/x86/sgx/3.API.rst
> >@@ -22,6 +22,6 @@ controls the `PROVISON_KEY` attribute.
> >  .. kernel-doc:: arch/x86/kernel/cpu/sgx/driver/ioctl.c
> >     :functions: sgx_ioc_enclave_create
> >-               sgx_ioc_enclave_add_page
> >+               sgx_ioc_enclave_add_region
> >                 sgx_ioc_enclave_init
> >                 sgx_ioc_enclave_set_attribute
> >diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
> >index 9ed690a38c70..30d114f6b3bd 100644
> >--- a/arch/x86/include/uapi/asm/sgx.h
> >+++ b/arch/x86/include/uapi/asm/sgx.h
> >@@ -12,8 +12,8 @@
> >  #define SGX_IOC_ENCLAVE_CREATE \
> >  	_IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
> >-#define SGX_IOC_ENCLAVE_ADD_PAGE \
> >-	_IOW(SGX_MAGIC, 0x01, struct sgx_enclave_add_page)
> >+#define SGX_IOC_ENCLAVE_ADD_REGION \
> >+	_IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_region)
> >  #define SGX_IOC_ENCLAVE_INIT \
> >  	_IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
> >  #define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \
> >@@ -32,21 +32,22 @@ struct sgx_enclave_create  {
> >  };
> >  /**
> >- * struct sgx_enclave_add_page - parameter structure for the
> >- *                               %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
> >- * @addr:	address within the ELRANGE
> >- * @src:	address for the page data
> >- * @secinfo:	address for the SECINFO data
> >- * @mrmask:	bitmask for the measured 256 byte chunks
> >+ * struct sgx_enclave_add_region - parameter structure for the
> >+ *                                 %SGX_IOC_ENCLAVE_ADD_REGION ioctl
> >+ * @addr:	start address within the ELRANGE
> >+ * @src:	start address for the pages' data
> >+ * @size:	size of region, in bytes
> >+ * @secinfo:	address of the SECINFO data (common to the entire region)
> >+ * @mrmask:	bitmask of 256 byte chunks to measure (applied per 4k page)
> >   */
> >-struct sgx_enclave_add_page {
> >+struct sgx_enclave_add_region {
> >  	__u64	addr;
> >  	__u64	src;
> >+	__u64	size;
> >  	__u64	secinfo;
> >  	__u16	mrmask;
> 
> Considering:
> 
> 1. I might want to load multiple pages that are not consecutive in memory.
> 2. Repeating mrmask (other than 0 or ~0) doesn't really make sense for
> ranges.
> 
> I'd be in favor of an approach that passes an array of sgx_enclave_add_page
> instead.

I'm not opposed to taking an array.  The region approach seemed simpler
at first glance, but that may not be the case, especially if we get rid of
the workqueue.  I'll play around with it.

> Somewhat unrelated: have you considered optionally "gifting" enclave source
> pages to the kernel (as in vmsplice)? That would avoid the copy_from_user.

If we ditch the workqueue then we probably don't even need to gift the
page, e.g. I think we allocate the EPC page prior to taking mmap_sem, and
then simply do gup+kmap around EADD.  We'd just need to be careful about
not allocating EPC pages for ioctls that are guaranteed to fail.
Andy Lutomirski June 13, 2019, 7:05 p.m. UTC | #4
On Thu, Jun 13, 2019 at 9:51 AM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Thu, Jun 13, 2019 at 12:43:42AM +0000, Jethro Beekman wrote:
> > On 2019-06-05 12:48, Sean Christopherson wrote:
> > >...to improve performance when building enclaves by reducing the number
> > >of user<->system transitions.  Rather than provide arbitrary batching,
> > >e.g. with per-page SECINFO and mrmask, take advantage of the fact that
> > >any sane enclave will have large swaths of pages with identical
> > >properties, e.g. code vs. data sections.
> > >
> > >For simplicity and stability in the initial implementation, loop over
> > >the existing add page flow instead of taking a more agressive approach,
> > >which would require tracking transitions between VMAs and holding
> > >mmap_sem for an extended duration.
> > >
> > >On an error, update the userspace struct to reflect progress made, e.g.
> > >so that the ioctl can be re-invoked to finish adding pages after a non-
> > >fatal error.
> > >
> > >Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> > >---
> > >  Documentation/x86/sgx/3.API.rst        |   2 +-
> > >  arch/x86/include/uapi/asm/sgx.h        |  21 ++--
> > >  arch/x86/kernel/cpu/sgx/driver/ioctl.c | 128 +++++++++++++++++--------
> > >  3 files changed, 98 insertions(+), 53 deletions(-)
> > >
> > >diff --git a/Documentation/x86/sgx/3.API.rst b/Documentation/x86/sgx/3.API.rst
> > >index b113aeb05f54..44550aa41073 100644
> > >--- a/Documentation/x86/sgx/3.API.rst
> > >+++ b/Documentation/x86/sgx/3.API.rst
> > >@@ -22,6 +22,6 @@ controls the `PROVISON_KEY` attribute.
> > >  .. kernel-doc:: arch/x86/kernel/cpu/sgx/driver/ioctl.c
> > >     :functions: sgx_ioc_enclave_create
> > >-               sgx_ioc_enclave_add_page
> > >+               sgx_ioc_enclave_add_region
> > >                 sgx_ioc_enclave_init
> > >                 sgx_ioc_enclave_set_attribute
> > >diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
> > >index 9ed690a38c70..30d114f6b3bd 100644
> > >--- a/arch/x86/include/uapi/asm/sgx.h
> > >+++ b/arch/x86/include/uapi/asm/sgx.h
> > >@@ -12,8 +12,8 @@
> > >  #define SGX_IOC_ENCLAVE_CREATE \
> > >     _IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
> > >-#define SGX_IOC_ENCLAVE_ADD_PAGE \
> > >-    _IOW(SGX_MAGIC, 0x01, struct sgx_enclave_add_page)
> > >+#define SGX_IOC_ENCLAVE_ADD_REGION \
> > >+    _IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_region)
> > >  #define SGX_IOC_ENCLAVE_INIT \
> > >     _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
> > >  #define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \
> > >@@ -32,21 +32,22 @@ struct sgx_enclave_create  {
> > >  };
> > >  /**
> > >- * struct sgx_enclave_add_page - parameter structure for the
> > >- *                               %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
> > >- * @addr:   address within the ELRANGE
> > >- * @src:    address for the page data
> > >- * @secinfo:        address for the SECINFO data
> > >- * @mrmask: bitmask for the measured 256 byte chunks
> > >+ * struct sgx_enclave_add_region - parameter structure for the
> > >+ *                                 %SGX_IOC_ENCLAVE_ADD_REGION ioctl
> > >+ * @addr:   start address within the ELRANGE
> > >+ * @src:    start address for the pages' data
> > >+ * @size:   size of region, in bytes
> > >+ * @secinfo:        address of the SECINFO data (common to the entire region)
> > >+ * @mrmask: bitmask of 256 byte chunks to measure (applied per 4k page)
> > >   */
> > >-struct sgx_enclave_add_page {
> > >+struct sgx_enclave_add_region {
> > >     __u64   addr;
> > >     __u64   src;
> > >+    __u64   size;
> > >     __u64   secinfo;
> > >     __u16   mrmask;
> >
> > Considering:
> >
> > 1. I might want to load multiple pages that are not consecutive in memory.
> > 2. Repeating mrmask (other than 0 or ~0) doesn't really make sense for
> > ranges.
> >
> > I'd be in favor of an approach that passes an array of sgx_enclave_add_page
> > instead.
>
> I'm not opposed to taking an array.  The region approach seemed simpler
> at first glance, but that may not be the case, especially if we get rid of
> the workqueue.  I'll play around with it.
>
> > Somewhat unrelated: have you considered optionally "gifting" enclave source
> > pages to the kernel (as in vmsplice)? That would avoid the copy_from_user.
>
> If we ditch the workqueue then we probably don't even need to gift the
> page, e.g. I think we allocate the EPC page prior to taking mmap_sem, and
> then simply do gup+kmap around EADD.  We'd just need to be careful about
> not allocating EPC pages for ioctls that are guaranteed to fail.
>
>

Why gup + kmap?  Can't you just do STAC; EADD; CLAC?  (Using the
appropriate C helpers, of course.)

--Andy
Sean Christopherson June 13, 2019, 7:15 p.m. UTC | #5
On Thu, Jun 13, 2019 at 12:05:37PM -0700, Andy Lutomirski wrote:
> On Thu, Jun 13, 2019 at 9:51 AM Sean Christopherson
> <sean.j.christopherson@intel.com> wrote:
> > If we ditch the workqueue then we probably don't even need to gift the
> > page, e.g. I think we allocate the EPC page prior to taking mmap_sem, and
> > then simply do gup+kmap around EADD.  We'd just need to be careful about
> > not allocating EPC pages for ioctls that are guaranteed to fail.
> >
> >
> 
> Why gup + kmap?  Can't you just do STAC; EADD; CLAC?  (Using the
> appropriate C helpers, of course.)

That should work as well, we already have exception fixup on ENCLS.  I
obviously haven't given much thought to what all we can do once the
workqueue goes bye bye :-)
Xing, Cedric June 13, 2019, 7:45 p.m. UTC | #6
> From: Christopherson, Sean J
> Sent: Thursday, June 13, 2019 9:52 AM
> 
> On Thu, Jun 13, 2019 at 12:43:42AM +0000, Jethro Beekman wrote:
> > On 2019-06-05 12:48, Sean Christopherson wrote:
> > >...to improve performance when building enclaves by reducing the
> > >number of user<->system transitions.  Rather than provide arbitrary
> > >batching, e.g. with per-page SECINFO and mrmask, take advantage of
> > >the fact that any sane enclave will have large swaths of pages with
> > >identical properties, e.g. code vs. data sections.
> > >
> > >For simplicity and stability in the initial implementation, loop over
> > >the existing add page flow instead of taking a more agressive
> > >approach, which would require tracking transitions between VMAs and
> > >holding mmap_sem for an extended duration.
> > >
> > >On an error, update the userspace struct to reflect progress made,
> e.g.
> > >so that the ioctl can be re-invoked to finish adding pages after a
> > >non- fatal error.
> > >
> > >Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> > >---
> > >  Documentation/x86/sgx/3.API.rst        |   2 +-
> > >  arch/x86/include/uapi/asm/sgx.h        |  21 ++--
> > >  arch/x86/kernel/cpu/sgx/driver/ioctl.c | 128
> > >+++++++++++++++++--------
> > >  3 files changed, 98 insertions(+), 53 deletions(-)
> > >
> > >diff --git a/Documentation/x86/sgx/3.API.rst
> > >b/Documentation/x86/sgx/3.API.rst index b113aeb05f54..44550aa41073
> > >100644
> > >--- a/Documentation/x86/sgx/3.API.rst
> > >+++ b/Documentation/x86/sgx/3.API.rst
> > >@@ -22,6 +22,6 @@ controls the `PROVISON_KEY` attribute.
> > >  .. kernel-doc:: arch/x86/kernel/cpu/sgx/driver/ioctl.c
> > >     :functions: sgx_ioc_enclave_create
> > >-               sgx_ioc_enclave_add_page
> > >+               sgx_ioc_enclave_add_region
> > >                 sgx_ioc_enclave_init
> > >                 sgx_ioc_enclave_set_attribute diff --git
> > >a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
> > >index 9ed690a38c70..30d114f6b3bd 100644
> > >--- a/arch/x86/include/uapi/asm/sgx.h
> > >+++ b/arch/x86/include/uapi/asm/sgx.h
> > >@@ -12,8 +12,8 @@
> > >  #define SGX_IOC_ENCLAVE_CREATE \
> > >  	_IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create) -#define
> > >SGX_IOC_ENCLAVE_ADD_PAGE \
> > >-	_IOW(SGX_MAGIC, 0x01, struct sgx_enclave_add_page)
> > >+#define SGX_IOC_ENCLAVE_ADD_REGION \
> > >+	_IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_region)
> > >  #define SGX_IOC_ENCLAVE_INIT \
> > >  	_IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
> > >  #define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \ @@ -32,21 +32,22 @@ struct
> > >sgx_enclave_create  {
> > >  };
> > >  /**
> > >- * struct sgx_enclave_add_page - parameter structure for the
> > >- *                               %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
> > >- * @addr:	address within the ELRANGE
> > >- * @src:	address for the page data
> > >- * @secinfo:	address for the SECINFO data
> > >- * @mrmask:	bitmask for the measured 256 byte chunks
> > >+ * struct sgx_enclave_add_region - parameter structure for the
> > >+ *                                 %SGX_IOC_ENCLAVE_ADD_REGION ioctl
> > >+ * @addr:	start address within the ELRANGE
> > >+ * @src:	start address for the pages' data
> > >+ * @size:	size of region, in bytes
> > >+ * @secinfo:	address of the SECINFO data (common to the entire
> region)
> > >+ * @mrmask:	bitmask of 256 byte chunks to measure (applied per 4k
> page)
> > >   */
> > >-struct sgx_enclave_add_page {
> > >+struct sgx_enclave_add_region {
> > >  	__u64	addr;
> > >  	__u64	src;
> > >+	__u64	size;
> > >  	__u64	secinfo;
> > >  	__u16	mrmask;
> >
> > Considering:
> >
> > 1. I might want to load multiple pages that are not consecutive in
> memory.
> > 2. Repeating mrmask (other than 0 or ~0) doesn't really make sense for
> > ranges.
> >
> > I'd be in favor of an approach that passes an array of
> > sgx_enclave_add_page instead.
> 
> I'm not opposed to taking an array.  The region approach seemed simpler
> at first glance, but that may not be the case, especially if we get rid
> of the workqueue.  I'll play around with it.

In practice, ELF segments are usually mapped continuously in memory. Other components of an enclave such as heaps and stack, are usually trunks of zeroes. I'd just stay simple. After all, there's no requirement that everything has to be done in a single ioctl.

Patch
diff mbox series

diff --git a/Documentation/x86/sgx/3.API.rst b/Documentation/x86/sgx/3.API.rst
index b113aeb05f54..44550aa41073 100644
--- a/Documentation/x86/sgx/3.API.rst
+++ b/Documentation/x86/sgx/3.API.rst
@@ -22,6 +22,6 @@  controls the `PROVISON_KEY` attribute.
 
 .. kernel-doc:: arch/x86/kernel/cpu/sgx/driver/ioctl.c
    :functions: sgx_ioc_enclave_create
-               sgx_ioc_enclave_add_page
+               sgx_ioc_enclave_add_region
                sgx_ioc_enclave_init
                sgx_ioc_enclave_set_attribute
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
index 9ed690a38c70..30d114f6b3bd 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
@@ -12,8 +12,8 @@ 
 
 #define SGX_IOC_ENCLAVE_CREATE \
 	_IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
-#define SGX_IOC_ENCLAVE_ADD_PAGE \
-	_IOW(SGX_MAGIC, 0x01, struct sgx_enclave_add_page)
+#define SGX_IOC_ENCLAVE_ADD_REGION \
+	_IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_region)
 #define SGX_IOC_ENCLAVE_INIT \
 	_IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
 #define SGX_IOC_ENCLAVE_SET_ATTRIBUTE \
@@ -32,21 +32,22 @@  struct sgx_enclave_create  {
 };
 
 /**
- * struct sgx_enclave_add_page - parameter structure for the
- *                               %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
- * @addr:	address within the ELRANGE
- * @src:	address for the page data
- * @secinfo:	address for the SECINFO data
- * @mrmask:	bitmask for the measured 256 byte chunks
+ * struct sgx_enclave_add_region - parameter structure for the
+ *                                 %SGX_IOC_ENCLAVE_ADD_REGION ioctl
+ * @addr:	start address within the ELRANGE
+ * @src:	start address for the pages' data
+ * @size:	size of region, in bytes
+ * @secinfo:	address of the SECINFO data (common to the entire region)
+ * @mrmask:	bitmask of 256 byte chunks to measure (applied per 4k page)
  */
-struct sgx_enclave_add_page {
+struct sgx_enclave_add_region {
 	__u64	addr;
 	__u64	src;
+	__u64	size;
 	__u64	secinfo;
 	__u16	mrmask;
 } __attribute__((__packed__));
 
-
 /**
  * struct sgx_enclave_init - parameter structure for the
  *                           %SGX_IOC_ENCLAVE_INIT ioctl
diff --git a/arch/x86/kernel/cpu/sgx/driver/ioctl.c b/arch/x86/kernel/cpu/sgx/driver/ioctl.c
index d17c60dca114..b69350696b87 100644
--- a/arch/x86/kernel/cpu/sgx/driver/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/driver/ioctl.c
@@ -487,10 +487,9 @@  static int sgx_validate_tcs(struct sgx_encl *encl, struct sgx_tcs *tcs)
 	return 0;
 }
 
-static int __sgx_encl_add_page(struct sgx_encl *encl,
+static int sgx_encl_queue_page(struct sgx_encl *encl,
 			       struct sgx_encl_page *encl_page,
-			       void *data,
-			       struct sgx_secinfo *secinfo,
+			       void *data, struct sgx_secinfo *secinfo,
 			       unsigned int mrmask)
 {
 	unsigned long page_index = sgx_encl_get_index(encl, encl_page);
@@ -529,9 +528,9 @@  static int __sgx_encl_add_page(struct sgx_encl *encl,
 	return 0;
 }
 
-static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
-			     void *data, struct sgx_secinfo *secinfo,
-			     unsigned int mrmask)
+static int __sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
+			       void *data, struct sgx_secinfo *secinfo,
+			       unsigned int mrmask)
 {
 	u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
 	struct sgx_encl_page *encl_page;
@@ -563,7 +562,7 @@  static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
 		goto out;
 	}
 
-	ret = __sgx_encl_add_page(encl, encl_page, data, secinfo, mrmask);
+	ret = sgx_encl_queue_page(encl, encl_page, data, secinfo, mrmask);
 	if (ret) {
 		radix_tree_delete(&encl_page->encl->page_tree,
 				  PFN_DOWN(encl_page->desc));
@@ -575,57 +574,102 @@  static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
 	return ret;
 }
 
-/**
- * sgx_ioc_enclave_add_page - handler for %SGX_IOC_ENCLAVE_ADD_PAGE
- *
- * @filep:	open file to /dev/sgx
- * @arg:	userspace pointer to a struct sgx_enclave_add_page instance
- *
- * Add a page to an uninitialized enclave (EADD), and optionally extend the
- * enclave's measurement with the contents of the page (EEXTEND).  EADD and
- * EEXTEND are done asynchronously via worker threads.  A successful
- * sgx_ioc_enclave_add_page() only indicates the page has been added to the
- * work queue, it does not guarantee adding the page to the enclave will
- * succeed.
- *
- * Return:
- *   0 on success,
- *   -errno otherwise
- */
-static long sgx_ioc_enclave_add_page(struct file *filep, void __user *arg)
+static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
+			     unsigned long src, struct sgx_secinfo *secinfo,
+			     unsigned int mrmask)
 {
-	struct sgx_encl *encl = filep->private_data;
-	struct sgx_enclave_add_page addp;
-	struct sgx_secinfo secinfo;
 	struct page *data_page;
 	void *data;
 	int ret;
 
-	if (copy_from_user(&addp, arg, sizeof(addp)))
-		return -EFAULT;
-
-	if (copy_from_user(&secinfo, (void __user *)addp.secinfo,
-			   sizeof(secinfo)))
-		return -EFAULT;
-
 	data_page = alloc_page(GFP_HIGHUSER);
 	if (!data_page)
 		return -ENOMEM;
 
 	data = kmap(data_page);
 
-	if (copy_from_user((void *)data, (void __user *)addp.src, PAGE_SIZE)) {
+	if (copy_from_user((void *)data, (void __user *)src, PAGE_SIZE)) {
 		ret = -EFAULT;
 		goto out;
 	}
 
-	ret = sgx_encl_add_page(encl, addp.addr, data, &secinfo, addp.mrmask);
-	if (ret)
-		goto out;
-
+	ret = __sgx_encl_add_page(encl, addr, data, secinfo, mrmask);
 out:
 	kunmap(data_page);
 	__free_page(data_page);
+
+	return ret;
+}
+
+/**
+ * sgx_ioc_enclave_add_region - handler for %SGX_IOC_ENCLAVE_ADD_REGION
+ *
+ * @filep:	open file to /dev/sgx
+ * @arg:	userspace pointer to a struct sgx_enclave_add_region instance
+ *
+ * Add a range of pages to an uninitialized enclave (EADD), and optionally
+ * extend the enclave's measurement with the contents of the page (EEXTEND).
+ * The range of pages must be virtually contiguous.  The SECINFO and
+ * measurement maskare applied to all pages, i.e. pages with different
+ * properties must be added in separate calls.
+ *
+ * EADD and EEXTEND are done asynchronously via worker threads.  A successful
+ * sgx_ioc_enclave_add_region() only indicates the pages have been added to the
+ * work queue, it does not guarantee adding the pages to the enclave will
+ * succeed.
+ *
+ * On failure, @arg->addr, @arg->src and @arg->size are adjusted to reflect
+ * the remaining pages that need to be added to the enclave, i.e. userspace
+ * can re-invoke SGX_IOC_ENCLAVE_ADD_REGION using the same struct.
+ *
+ * Return:
+ *   0 on success,
+ *   -errno otherwise
+ */
+static long sgx_ioc_enclave_add_region(struct file *filep, void __user *arg)
+{
+	struct sgx_encl *encl = filep->private_data;
+	struct sgx_enclave_add_region region;
+	struct sgx_secinfo secinfo;
+	unsigned long i;
+	int ret;
+
+	if (copy_from_user(&region, arg, sizeof(region)))
+		return -EFAULT;
+
+	if (!IS_ALIGNED(region.addr, 4096) || !IS_ALIGNED(region.size, 4096))
+		return -EINVAL;
+
+	if (!region.size)
+		return 0;
+
+	if (copy_from_user(&secinfo, (void __user *)region.secinfo,
+			   sizeof(secinfo)))
+		return -EFAULT;
+
+	ret = 0;
+	for (i = 0; i < region.size; i += PAGE_SIZE) {
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		if (need_resched())
+			cond_resched();
+
+		ret = sgx_encl_add_page(encl, region.addr + i, region.src + i,
+					&secinfo, region.mrmask);
+		if (ret)
+			break;
+	}
+	if (ret && i) {
+		region.addr += i;
+		region.src  += i;
+		region.size -= i;
+
+		if (copy_to_user(arg, &region, sizeof(region)))
+			return -EFAULT;
+	}
 	return ret;
 }
 
@@ -817,8 +861,8 @@  long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 	switch (cmd) {
 	case SGX_IOC_ENCLAVE_CREATE:
 		return sgx_ioc_enclave_create(filep, (void __user *)arg);
-	case SGX_IOC_ENCLAVE_ADD_PAGE:
-		return sgx_ioc_enclave_add_page(filep, (void __user *)arg);
+	case SGX_IOC_ENCLAVE_ADD_REGION:
+		return sgx_ioc_enclave_add_region(filep, (void __user *)arg);
 	case SGX_IOC_ENCLAVE_INIT:
 		return sgx_ioc_enclave_init(filep, (void __user *)arg);
 	case SGX_IOC_ENCLAVE_SET_ATTRIBUTE: