diff mbox

[2/3,RFC] PM / Hibernate: Encrypt the snapshot pages before submitted to the block device

Message ID 5a1cc6bff40ff9a3e023392c69b881e91b16837a.1529486870.git.yu.c.chen@intel.com (mailing list archive)
State Superseded, archived
Headers show

Commit Message

Chen Yu June 20, 2018, 9:40 a.m. UTC
Use the helper functions introduced previously to encrypt
the page data before they are submitted to the block device.
Besides, for the case of hibernation compression, the data
are firstly compressed and then encrypted, and vice versa
for the resume process.

Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Len Brown <len.brown@intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "Lee, Chun-Yi" <jlee@suse.com>
Cc: linux-pm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Chen Yu <yu.c.chen@intel.com>
---
 kernel/power/power.h |   1 +
 kernel/power/swap.c  | 215 ++++++++++++++++++++++++++++++++++++++++++++++++---
 2 files changed, 205 insertions(+), 11 deletions(-)

Comments

joeyli June 28, 2018, 1:07 p.m. UTC | #1
Hi Chen Yu,

On Wed, Jun 20, 2018 at 05:40:32PM +0800, Chen Yu wrote:
> Use the helper functions introduced previously to encrypt
> the page data before they are submitted to the block device.
> Besides, for the case of hibernation compression, the data
> are firstly compressed and then encrypted, and vice versa
> for the resume process.
>

I want to suggest my solution that it direct signs/encrypts the
memory snapshot image. This solution is already shipped with
SLE12 a couple of years:

https://github.com/joeyli/linux-s4sign/commits/s4sign-hmac-encrypted-key-v0.2-v4.17-rc3

The above patches still need to clean up. I am working on some
other bugs, but I can clean up and send out it ASAP.

The advantage of this solution is that it produces a signed and
encrypted image. Not just for writing to block device by kernel,
it also can provide a signed/encrypted image to user space. User
space can store the encrypted image to anywhere.

I am OK for your user space key generator because I didn't have
similar solution yet. I am working on the EFI master key and also
want to adapt hibernation to keyring. I will continue the works.

Thanks a lot!
Joey Lee
 
> Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> Cc: Pavel Machek <pavel@ucw.cz>
> Cc: Len Brown <len.brown@intel.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: "Lee, Chun-Yi" <jlee@suse.com>
> Cc: linux-pm@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Chen Yu <yu.c.chen@intel.com>
> ---
>  kernel/power/power.h |   1 +
>  kernel/power/swap.c  | 215 ++++++++++++++++++++++++++++++++++++++++++++++++---
>  2 files changed, 205 insertions(+), 11 deletions(-)
> 
> diff --git a/kernel/power/power.h b/kernel/power/power.h
> index 660aac3..637695c 100644
> --- a/kernel/power/power.h
> +++ b/kernel/power/power.h
> @@ -207,6 +207,7 @@ extern int swsusp_swap_in_use(void);
>  #define SF_PLATFORM_MODE	1
>  #define SF_NOCOMPRESS_MODE	2
>  #define SF_CRC32_MODE	        4
> +#define SF_ENCRYPT_MODE		8
>  
>  /* kernel/power/hibernate.c */
>  extern int swsusp_check(void);
> diff --git a/kernel/power/swap.c b/kernel/power/swap.c
> index c2bcf97..2b6b3d0 100644
> --- a/kernel/power/swap.c
> +++ b/kernel/power/swap.c
> @@ -102,14 +102,16 @@ struct swap_map_handle {
>  	unsigned int k;
>  	unsigned long reqd_free_pages;
>  	u32 crc32;
> +	bool crypto;
>  };
>  
>  struct swsusp_header {
>  	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
> -	              sizeof(u32)];
> +			sizeof(u32) - HIBERNATE_SALT_BYTES];
>  	u32	crc32;
>  	sector_t image;
>  	unsigned int flags;	/* Flags to pass to the "boot" kernel */
> +	char salt[HIBERNATE_SALT_BYTES];
>  	char	orig_sig[10];
>  	char	sig[10];
>  } __packed;
> @@ -127,6 +129,53 @@ struct swsusp_extent {
>  	unsigned long end;
>  };
>  
> +/* For encryption/decryption. */
> +static struct hibernation_crypto *hibernation_crypto_ops;
> +
> +void set_hibernation_ops(struct hibernation_crypto *ops)
> +{
> +	hibernation_crypto_ops = ops;
> +}
> +EXPORT_SYMBOL_GPL(set_hibernation_ops);
> +
> +static int crypto_data(const char *inbuf,
> +			    int inlen,
> +			    char *outbuf,
> +			    int outlen,
> +			    bool encrypt,
> +			    int page_idx)
> +{
> +	if (hibernation_crypto_ops &&
> +	    hibernation_crypto_ops->crypto_data)
> +		return hibernation_crypto_ops->crypto_data(inbuf,
> +			inlen, outbuf, outlen, encrypt, page_idx);
> +	else
> +		return -EINVAL;
> +}
> +
> +static void crypto_save(void *outbuf)
> +{
> +	if (hibernation_crypto_ops &&
> +	    hibernation_crypto_ops->save)
> +		hibernation_crypto_ops->save(outbuf);
> +}
> +
> +static void crypto_restore(void *inbuf)
> +{
> +	if (hibernation_crypto_ops &&
> +	    hibernation_crypto_ops->restore)
> +		hibernation_crypto_ops->restore(inbuf);
> +}
> +
> +static int crypto_init(bool suspend)
> +{
> +	if (hibernation_crypto_ops &&
> +	    hibernation_crypto_ops->init)
> +		return hibernation_crypto_ops->init(suspend);
> +	else
> +		return -EINVAL;
> +}
> +
>  static struct rb_root swsusp_extents = RB_ROOT;
>  
>  static int swsusp_extents_insert(unsigned long swap_offset)
> @@ -318,6 +367,10 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
>  		swsusp_header->flags = flags;
>  		if (flags & SF_CRC32_MODE)
>  			swsusp_header->crc32 = handle->crc32;
> +		if (handle->crypto) {
> +			swsusp_header->flags |= SF_ENCRYPT_MODE;
> +			crypto_save((void *)swsusp_header->salt);
> +		}
>  		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
>  				      swsusp_resume_block, swsusp_header, NULL);
>  	} else {
> @@ -535,11 +588,12 @@ static int save_image(struct swap_map_handle *handle,
>  {
>  	unsigned int m;
>  	int ret;
> -	int nr_pages;
> +	int nr_pages, crypto_page_idx;
>  	int err2;
>  	struct hib_bio_batch hb;
>  	ktime_t start;
>  	ktime_t stop;
> +	void *tmp = NULL, *crypt_buf = NULL;
>  
>  	hib_init_batch(&hb);
>  
> @@ -549,12 +603,33 @@ static int save_image(struct swap_map_handle *handle,
>  	if (!m)
>  		m = 1;
>  	nr_pages = 0;
> +	crypto_page_idx = 0;
> +	if (handle->crypto) {
> +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> +		if (!crypt_buf)
> +			return -ENOMEM;
> +	}
> +
>  	start = ktime_get();
>  	while (1) {
>  		ret = snapshot_read_next(snapshot);
>  		if (ret <= 0)
>  			break;
> -		ret = swap_write_page(handle, data_of(*snapshot), &hb);
> +		tmp = data_of(*snapshot);
> +		if (handle->crypto) {
> +			/* Encryption before submit_io.*/
> +			ret = crypto_data(data_of(*snapshot),
> +					  PAGE_SIZE,
> +					  crypt_buf,
> +					  PAGE_SIZE,
> +					  true,
> +					  crypto_page_idx);
> +			if (ret)
> +				goto out;
> +			crypto_page_idx++;
> +			tmp = crypt_buf;
> +		}
> +		ret = swap_write_page(handle, tmp, &hb);
>  		if (ret)
>  			break;
>  		if (!(nr_pages % m))
> @@ -569,6 +644,9 @@ static int save_image(struct swap_map_handle *handle,
>  	if (!ret)
>  		pr_info("Image saving done\n");
>  	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
> + out:
> +	if (crypt_buf)
> +		free_page((unsigned long)crypt_buf);
>  	return ret;
>  }
>  
> @@ -671,7 +749,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
>  {
>  	unsigned int m;
>  	int ret = 0;
> -	int nr_pages;
> +	int nr_pages, crypto_page_idx;
>  	int err2;
>  	struct hib_bio_batch hb;
>  	ktime_t start;
> @@ -767,6 +845,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
>  	if (!m)
>  		m = 1;
>  	nr_pages = 0;
> +	crypto_page_idx = 0;
>  	start = ktime_get();
>  	for (;;) {
>  		for (thr = 0; thr < nr_threads; thr++) {
> @@ -835,7 +914,25 @@ static int save_image_lzo(struct swap_map_handle *handle,
>  			for (off = 0;
>  			     off < LZO_HEADER + data[thr].cmp_len;
>  			     off += PAGE_SIZE) {
> -				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
> +				if (handle->crypto) {
> +					/*
> +					 * Encrypt the compressed data
> +					 * before we write them to the
> +					 * block device.
> +					 */
> +					ret = crypto_data(data[thr].cmp + off,
> +							  PAGE_SIZE,
> +							  page,
> +							  PAGE_SIZE,
> +							  true,
> +							  crypto_page_idx);
> +					if (ret)
> +						goto out_finish;
> +					crypto_page_idx++;
> +				} else {
> +					memcpy(page, data[thr].cmp + off,
> +						PAGE_SIZE);
> +				}
>  
>  				ret = swap_write_page(handle, page, &hb);
>  				if (ret)
> @@ -909,6 +1006,7 @@ int swsusp_write(unsigned int flags)
>  	int error;
>  
>  	pages = snapshot_get_image_size();
> +	memset(&handle, 0, sizeof(struct swap_map_handle));
>  	error = get_swap_writer(&handle);
>  	if (error) {
>  		pr_err("Cannot get swap writer\n");
> @@ -922,6 +1020,9 @@ int swsusp_write(unsigned int flags)
>  		}
>  	}
>  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
> +	if (!crypto_init(true))
> +		/* The image needs to be encrypted. */
> +		handle.crypto = true;
>  	error = snapshot_read_next(&snapshot);
>  	if (error < PAGE_SIZE) {
>  		if (error >= 0)
> @@ -1059,7 +1160,8 @@ static int load_image(struct swap_map_handle *handle,
>  	ktime_t stop;
>  	struct hib_bio_batch hb;
>  	int err2;
> -	unsigned nr_pages;
> +	unsigned nr_pages, crypto_page_idx;
> +	void *crypt_buf = NULL;
>  
>  	hib_init_batch(&hb);
>  
> @@ -1069,18 +1171,42 @@ static int load_image(struct swap_map_handle *handle,
>  	if (!m)
>  		m = 1;
>  	nr_pages = 0;
> +	crypto_page_idx = 0;
> +	if (handle->crypto) {
> +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> +		if (!crypt_buf)
> +			return -ENOMEM;
> +	}
>  	start = ktime_get();
>  	for ( ; ; ) {
>  		ret = snapshot_write_next(snapshot);
>  		if (ret <= 0)
>  			break;
> -		ret = swap_read_page(handle, data_of(*snapshot), &hb);
> +		if (handle->crypto)
> +			ret = swap_read_page(handle, crypt_buf, &hb);
> +		else
> +			ret = swap_read_page(handle, data_of(*snapshot), &hb);
>  		if (ret)
>  			break;
>  		if (snapshot->sync_read)
>  			ret = hib_wait_io(&hb);
>  		if (ret)
>  			break;
> +		if (handle->crypto) {
> +			/*
> +			 * Need a decryption for the
> +			 * data read from the block
> +			 * device.
> +			 */
> +			ret = crypto_data(crypt_buf, PAGE_SIZE,
> +					  data_of(*snapshot),
> +					  PAGE_SIZE,
> +					  false,
> +					  crypto_page_idx);
> +			if (ret)
> +				break;
> +			crypto_page_idx++;
> +		}
>  		if (!(nr_pages % m))
>  			pr_info("Image loading progress: %3d%%\n",
>  				nr_pages / m * 10);
> @@ -1097,6 +1223,8 @@ static int load_image(struct swap_map_handle *handle,
>  			ret = -ENODATA;
>  	}
>  	swsusp_show_speed(start, stop, nr_to_read, "Read");
> +	if (crypt_buf)
> +		free_page((unsigned long)crypt_buf);
>  	return ret;
>  }
>  
> @@ -1164,7 +1292,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
>  	struct hib_bio_batch hb;
>  	ktime_t start;
>  	ktime_t stop;
> -	unsigned nr_pages;
> +	unsigned nr_pages, crypto_page_idx;
>  	size_t off;
>  	unsigned i, thr, run_threads, nr_threads;
>  	unsigned ring = 0, pg = 0, ring_size = 0,
> @@ -1173,6 +1301,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
>  	unsigned char **page = NULL;
>  	struct dec_data *data = NULL;
>  	struct crc_data *crc = NULL;
> +	void *first_page = NULL;
>  
>  	hib_init_batch(&hb);
>  
> @@ -1278,6 +1407,18 @@ static int load_image_lzo(struct swap_map_handle *handle,
>  	}
>  	want = ring_size = i;
>  
> +	/*
> +	 * The first page of data[thr] contains the length of
> +	 * compressed data, this page should not mess up the
> +	 * read buffer, so we allocate a separate page for it.
> +	 */
> +	if (handle->crypto) {
> +		first_page = (void *)get_zeroed_page(GFP_KERNEL);
> +		if (!first_page) {
> +			ret = -ENOMEM;
> +			goto out_clean;
> +		}
> +	}
>  	pr_info("Using %u thread(s) for decompression\n", nr_threads);
>  	pr_info("Loading and decompressing image data (%u pages)...\n",
>  		nr_to_read);
> @@ -1285,6 +1426,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
>  	if (!m)
>  		m = 1;
>  	nr_pages = 0;
> +	crypto_page_idx = 0;
>  	start = ktime_get();
>  
>  	ret = snapshot_write_next(snapshot);
> @@ -1336,7 +1478,24 @@ static int load_image_lzo(struct swap_map_handle *handle,
>  		}
>  
>  		for (thr = 0; have && thr < nr_threads; thr++) {
> -			data[thr].cmp_len = *(size_t *)page[pg];
> +			if (handle->crypto) {
> +				/*
> +				 * Need to decrypt the first page
> +				 * of each data[thr], which contains
> +				 * the compressed data length.
> +				 */
> +				ret = crypto_data(page[pg],
> +						  PAGE_SIZE,
> +						  first_page,
> +						  PAGE_SIZE,
> +						  false,
> +						  crypto_page_idx);
> +				if (ret)
> +					goto out_finish;
> +				data[thr].cmp_len = *(size_t *)first_page;
> +			} else {
> +				data[thr].cmp_len = *(size_t *)page[pg];
> +			}
>  			if (unlikely(!data[thr].cmp_len ||
>  			             data[thr].cmp_len >
>  			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
> @@ -1358,8 +1517,26 @@ static int load_image_lzo(struct swap_map_handle *handle,
>  			for (off = 0;
>  			     off < LZO_HEADER + data[thr].cmp_len;
>  			     off += PAGE_SIZE) {
> -				memcpy(data[thr].cmp + off,
> -				       page[pg], PAGE_SIZE);
> +				if (handle->crypto) {
> +					/*
> +					 * Decrypt the compressed data
> +					 * and leverage the decompression
> +					 * threads to get it done.
> +					 */
> +					ret = crypto_data(page[pg],
> +							  PAGE_SIZE,
> +							  data[thr].cmp + off,
> +							  PAGE_SIZE,
> +							  false,
> +							  crypto_page_idx);
> +					if (ret)
> +						goto out_finish;
> +					crypto_page_idx++;
> +				} else {
> +					memcpy(data[thr].cmp + off,
> +						page[pg], PAGE_SIZE);
> +
> +				}
>  				have--;
>  				want++;
>  				if (++pg >= ring_size)
> @@ -1452,6 +1629,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
>  out_clean:
>  	for (i = 0; i < ring_size; i++)
>  		free_page((unsigned long)page[i]);
> +	if (first_page)
> +		free_page((unsigned long)first_page);
>  	if (crc) {
>  		if (crc->thr)
>  			kthread_stop(crc->thr);
> @@ -1482,6 +1661,7 @@ int swsusp_read(unsigned int *flags_p)
>  	struct swsusp_info *header;
>  
>  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
> +	memset(&handle, 0, sizeof(struct swap_map_handle));
>  	error = snapshot_write_next(&snapshot);
>  	if (error < PAGE_SIZE)
>  		return error < 0 ? error : -EFAULT;
> @@ -1489,6 +1669,16 @@ int swsusp_read(unsigned int *flags_p)
>  	error = get_swap_reader(&handle, flags_p);
>  	if (error)
>  		goto end;
> +	if (*flags_p & SF_ENCRYPT_MODE) {
> +		error = crypto_init(false);
> +		if (!error) {
> +			/* The image has been encrypted. */
> +			handle.crypto = true;
> +		} else {
> +			pr_err("Failed to init cipher during resume.\n");
> +			goto end;
> +		}
> +	}
>  	if (!error)
>  		error = swap_read_page(&handle, header, NULL);
>  	if (!error) {
> @@ -1526,6 +1716,9 @@ int swsusp_check(void)
>  
>  		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
>  			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
> +			/* Read salt passed from previous kernel. */
> +			if (swsusp_header->flags & SF_ENCRYPT_MODE)
> +				crypto_restore((void *)&swsusp_header->salt);
>  			/* Reset swap signature now */
>  			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
>  						swsusp_resume_block,
> -- 
> 2.7.4
>
Chen Yu June 28, 2018, 1:50 p.m. UTC | #2
Hi,
On Thu, Jun 28, 2018 at 09:07:20PM +0800, joeyli wrote:
> Hi Chen Yu,
> 
> On Wed, Jun 20, 2018 at 05:40:32PM +0800, Chen Yu wrote:
> > Use the helper functions introduced previously to encrypt
> > the page data before they are submitted to the block device.
> > Besides, for the case of hibernation compression, the data
> > are firstly compressed and then encrypted, and vice versa
> > for the resume process.
> >
> 
> I want to suggest my solution that it direct signs/encrypts the
> memory snapshot image. This solution is already shipped with
> SLE12 a couple of years:
> 
> https://github.com/joeyli/linux-s4sign/commits/s4sign-hmac-encrypted-key-v0.2-v4.17-rc3
> 
I did not see image page encryption in above link, if I understand
correctly, your code focus on signation and encrypt the hidden data,
but not target for the whole snapshot data.
> The above patches still need to clean up. I am working on some
> other bugs, but I can clean up and send out it ASAP.
> 
> The advantage of this solution is that it produces a signed and
> encrypted image. Not just for writing to block device by kernel,
> it also can provide a signed/encrypted image to user space. User
> space can store the encrypted image to anywhere.
> 
> I am OK for your user space key generator because I didn't have
> similar solution yet. I am working on the EFI master key and also
> want to adapt hibernation to keyring. I will continue the works.
> 
The user space tool can easily add the keyring support besides
ioctl if needed.

Best,
Yu
> Thanks a lot!
> Joey Lee
>  
> > Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > Cc: Pavel Machek <pavel@ucw.cz>
> > Cc: Len Brown <len.brown@intel.com>
> > Cc: Borislav Petkov <bp@alien8.de>
> > Cc: "Lee, Chun-Yi" <jlee@suse.com>
> > Cc: linux-pm@vger.kernel.org
> > Cc: linux-kernel@vger.kernel.org
> > Signed-off-by: Chen Yu <yu.c.chen@intel.com>
> > ---
> >  kernel/power/power.h |   1 +
> >  kernel/power/swap.c  | 215 ++++++++++++++++++++++++++++++++++++++++++++++++---
> >  2 files changed, 205 insertions(+), 11 deletions(-)
> > 
> > diff --git a/kernel/power/power.h b/kernel/power/power.h
> > index 660aac3..637695c 100644
> > --- a/kernel/power/power.h
> > +++ b/kernel/power/power.h
> > @@ -207,6 +207,7 @@ extern int swsusp_swap_in_use(void);
> >  #define SF_PLATFORM_MODE	1
> >  #define SF_NOCOMPRESS_MODE	2
> >  #define SF_CRC32_MODE	        4
> > +#define SF_ENCRYPT_MODE		8
> >  
> >  /* kernel/power/hibernate.c */
> >  extern int swsusp_check(void);
> > diff --git a/kernel/power/swap.c b/kernel/power/swap.c
> > index c2bcf97..2b6b3d0 100644
> > --- a/kernel/power/swap.c
> > +++ b/kernel/power/swap.c
> > @@ -102,14 +102,16 @@ struct swap_map_handle {
> >  	unsigned int k;
> >  	unsigned long reqd_free_pages;
> >  	u32 crc32;
> > +	bool crypto;
> >  };
> >  
> >  struct swsusp_header {
> >  	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
> > -	              sizeof(u32)];
> > +			sizeof(u32) - HIBERNATE_SALT_BYTES];
> >  	u32	crc32;
> >  	sector_t image;
> >  	unsigned int flags;	/* Flags to pass to the "boot" kernel */
> > +	char salt[HIBERNATE_SALT_BYTES];
> >  	char	orig_sig[10];
> >  	char	sig[10];
> >  } __packed;
> > @@ -127,6 +129,53 @@ struct swsusp_extent {
> >  	unsigned long end;
> >  };
> >  
> > +/* For encryption/decryption. */
> > +static struct hibernation_crypto *hibernation_crypto_ops;
> > +
> > +void set_hibernation_ops(struct hibernation_crypto *ops)
> > +{
> > +	hibernation_crypto_ops = ops;
> > +}
> > +EXPORT_SYMBOL_GPL(set_hibernation_ops);
> > +
> > +static int crypto_data(const char *inbuf,
> > +			    int inlen,
> > +			    char *outbuf,
> > +			    int outlen,
> > +			    bool encrypt,
> > +			    int page_idx)
> > +{
> > +	if (hibernation_crypto_ops &&
> > +	    hibernation_crypto_ops->crypto_data)
> > +		return hibernation_crypto_ops->crypto_data(inbuf,
> > +			inlen, outbuf, outlen, encrypt, page_idx);
> > +	else
> > +		return -EINVAL;
> > +}
> > +
> > +static void crypto_save(void *outbuf)
> > +{
> > +	if (hibernation_crypto_ops &&
> > +	    hibernation_crypto_ops->save)
> > +		hibernation_crypto_ops->save(outbuf);
> > +}
> > +
> > +static void crypto_restore(void *inbuf)
> > +{
> > +	if (hibernation_crypto_ops &&
> > +	    hibernation_crypto_ops->restore)
> > +		hibernation_crypto_ops->restore(inbuf);
> > +}
> > +
> > +static int crypto_init(bool suspend)
> > +{
> > +	if (hibernation_crypto_ops &&
> > +	    hibernation_crypto_ops->init)
> > +		return hibernation_crypto_ops->init(suspend);
> > +	else
> > +		return -EINVAL;
> > +}
> > +
> >  static struct rb_root swsusp_extents = RB_ROOT;
> >  
> >  static int swsusp_extents_insert(unsigned long swap_offset)
> > @@ -318,6 +367,10 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
> >  		swsusp_header->flags = flags;
> >  		if (flags & SF_CRC32_MODE)
> >  			swsusp_header->crc32 = handle->crc32;
> > +		if (handle->crypto) {
> > +			swsusp_header->flags |= SF_ENCRYPT_MODE;
> > +			crypto_save((void *)swsusp_header->salt);
> > +		}
> >  		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
> >  				      swsusp_resume_block, swsusp_header, NULL);
> >  	} else {
> > @@ -535,11 +588,12 @@ static int save_image(struct swap_map_handle *handle,
> >  {
> >  	unsigned int m;
> >  	int ret;
> > -	int nr_pages;
> > +	int nr_pages, crypto_page_idx;
> >  	int err2;
> >  	struct hib_bio_batch hb;
> >  	ktime_t start;
> >  	ktime_t stop;
> > +	void *tmp = NULL, *crypt_buf = NULL;
> >  
> >  	hib_init_batch(&hb);
> >  
> > @@ -549,12 +603,33 @@ static int save_image(struct swap_map_handle *handle,
> >  	if (!m)
> >  		m = 1;
> >  	nr_pages = 0;
> > +	crypto_page_idx = 0;
> > +	if (handle->crypto) {
> > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > +		if (!crypt_buf)
> > +			return -ENOMEM;
> > +	}
> > +
> >  	start = ktime_get();
> >  	while (1) {
> >  		ret = snapshot_read_next(snapshot);
> >  		if (ret <= 0)
> >  			break;
> > -		ret = swap_write_page(handle, data_of(*snapshot), &hb);
> > +		tmp = data_of(*snapshot);
> > +		if (handle->crypto) {
> > +			/* Encryption before submit_io.*/
> > +			ret = crypto_data(data_of(*snapshot),
> > +					  PAGE_SIZE,
> > +					  crypt_buf,
> > +					  PAGE_SIZE,
> > +					  true,
> > +					  crypto_page_idx);
> > +			if (ret)
> > +				goto out;
> > +			crypto_page_idx++;
> > +			tmp = crypt_buf;
> > +		}
> > +		ret = swap_write_page(handle, tmp, &hb);
> >  		if (ret)
> >  			break;
> >  		if (!(nr_pages % m))
> > @@ -569,6 +644,9 @@ static int save_image(struct swap_map_handle *handle,
> >  	if (!ret)
> >  		pr_info("Image saving done\n");
> >  	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
> > + out:
> > +	if (crypt_buf)
> > +		free_page((unsigned long)crypt_buf);
> >  	return ret;
> >  }
> >  
> > @@ -671,7 +749,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
> >  {
> >  	unsigned int m;
> >  	int ret = 0;
> > -	int nr_pages;
> > +	int nr_pages, crypto_page_idx;
> >  	int err2;
> >  	struct hib_bio_batch hb;
> >  	ktime_t start;
> > @@ -767,6 +845,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
> >  	if (!m)
> >  		m = 1;
> >  	nr_pages = 0;
> > +	crypto_page_idx = 0;
> >  	start = ktime_get();
> >  	for (;;) {
> >  		for (thr = 0; thr < nr_threads; thr++) {
> > @@ -835,7 +914,25 @@ static int save_image_lzo(struct swap_map_handle *handle,
> >  			for (off = 0;
> >  			     off < LZO_HEADER + data[thr].cmp_len;
> >  			     off += PAGE_SIZE) {
> > -				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
> > +				if (handle->crypto) {
> > +					/*
> > +					 * Encrypt the compressed data
> > +					 * before we write them to the
> > +					 * block device.
> > +					 */
> > +					ret = crypto_data(data[thr].cmp + off,
> > +							  PAGE_SIZE,
> > +							  page,
> > +							  PAGE_SIZE,
> > +							  true,
> > +							  crypto_page_idx);
> > +					if (ret)
> > +						goto out_finish;
> > +					crypto_page_idx++;
> > +				} else {
> > +					memcpy(page, data[thr].cmp + off,
> > +						PAGE_SIZE);
> > +				}
> >  
> >  				ret = swap_write_page(handle, page, &hb);
> >  				if (ret)
> > @@ -909,6 +1006,7 @@ int swsusp_write(unsigned int flags)
> >  	int error;
> >  
> >  	pages = snapshot_get_image_size();
> > +	memset(&handle, 0, sizeof(struct swap_map_handle));
> >  	error = get_swap_writer(&handle);
> >  	if (error) {
> >  		pr_err("Cannot get swap writer\n");
> > @@ -922,6 +1020,9 @@ int swsusp_write(unsigned int flags)
> >  		}
> >  	}
> >  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
> > +	if (!crypto_init(true))
> > +		/* The image needs to be encrypted. */
> > +		handle.crypto = true;
> >  	error = snapshot_read_next(&snapshot);
> >  	if (error < PAGE_SIZE) {
> >  		if (error >= 0)
> > @@ -1059,7 +1160,8 @@ static int load_image(struct swap_map_handle *handle,
> >  	ktime_t stop;
> >  	struct hib_bio_batch hb;
> >  	int err2;
> > -	unsigned nr_pages;
> > +	unsigned nr_pages, crypto_page_idx;
> > +	void *crypt_buf = NULL;
> >  
> >  	hib_init_batch(&hb);
> >  
> > @@ -1069,18 +1171,42 @@ static int load_image(struct swap_map_handle *handle,
> >  	if (!m)
> >  		m = 1;
> >  	nr_pages = 0;
> > +	crypto_page_idx = 0;
> > +	if (handle->crypto) {
> > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > +		if (!crypt_buf)
> > +			return -ENOMEM;
> > +	}
> >  	start = ktime_get();
> >  	for ( ; ; ) {
> >  		ret = snapshot_write_next(snapshot);
> >  		if (ret <= 0)
> >  			break;
> > -		ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > +		if (handle->crypto)
> > +			ret = swap_read_page(handle, crypt_buf, &hb);
> > +		else
> > +			ret = swap_read_page(handle, data_of(*snapshot), &hb);
> >  		if (ret)
> >  			break;
> >  		if (snapshot->sync_read)
> >  			ret = hib_wait_io(&hb);
> >  		if (ret)
> >  			break;
> > +		if (handle->crypto) {
> > +			/*
> > +			 * Need a decryption for the
> > +			 * data read from the block
> > +			 * device.
> > +			 */
> > +			ret = crypto_data(crypt_buf, PAGE_SIZE,
> > +					  data_of(*snapshot),
> > +					  PAGE_SIZE,
> > +					  false,
> > +					  crypto_page_idx);
> > +			if (ret)
> > +				break;
> > +			crypto_page_idx++;
> > +		}
> >  		if (!(nr_pages % m))
> >  			pr_info("Image loading progress: %3d%%\n",
> >  				nr_pages / m * 10);
> > @@ -1097,6 +1223,8 @@ static int load_image(struct swap_map_handle *handle,
> >  			ret = -ENODATA;
> >  	}
> >  	swsusp_show_speed(start, stop, nr_to_read, "Read");
> > +	if (crypt_buf)
> > +		free_page((unsigned long)crypt_buf);
> >  	return ret;
> >  }
> >  
> > @@ -1164,7 +1292,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
> >  	struct hib_bio_batch hb;
> >  	ktime_t start;
> >  	ktime_t stop;
> > -	unsigned nr_pages;
> > +	unsigned nr_pages, crypto_page_idx;
> >  	size_t off;
> >  	unsigned i, thr, run_threads, nr_threads;
> >  	unsigned ring = 0, pg = 0, ring_size = 0,
> > @@ -1173,6 +1301,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
> >  	unsigned char **page = NULL;
> >  	struct dec_data *data = NULL;
> >  	struct crc_data *crc = NULL;
> > +	void *first_page = NULL;
> >  
> >  	hib_init_batch(&hb);
> >  
> > @@ -1278,6 +1407,18 @@ static int load_image_lzo(struct swap_map_handle *handle,
> >  	}
> >  	want = ring_size = i;
> >  
> > +	/*
> > +	 * The first page of data[thr] contains the length of
> > +	 * compressed data, this page should not mess up the
> > +	 * read buffer, so we allocate a separate page for it.
> > +	 */
> > +	if (handle->crypto) {
> > +		first_page = (void *)get_zeroed_page(GFP_KERNEL);
> > +		if (!first_page) {
> > +			ret = -ENOMEM;
> > +			goto out_clean;
> > +		}
> > +	}
> >  	pr_info("Using %u thread(s) for decompression\n", nr_threads);
> >  	pr_info("Loading and decompressing image data (%u pages)...\n",
> >  		nr_to_read);
> > @@ -1285,6 +1426,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
> >  	if (!m)
> >  		m = 1;
> >  	nr_pages = 0;
> > +	crypto_page_idx = 0;
> >  	start = ktime_get();
> >  
> >  	ret = snapshot_write_next(snapshot);
> > @@ -1336,7 +1478,24 @@ static int load_image_lzo(struct swap_map_handle *handle,
> >  		}
> >  
> >  		for (thr = 0; have && thr < nr_threads; thr++) {
> > -			data[thr].cmp_len = *(size_t *)page[pg];
> > +			if (handle->crypto) {
> > +				/*
> > +				 * Need to decrypt the first page
> > +				 * of each data[thr], which contains
> > +				 * the compressed data length.
> > +				 */
> > +				ret = crypto_data(page[pg],
> > +						  PAGE_SIZE,
> > +						  first_page,
> > +						  PAGE_SIZE,
> > +						  false,
> > +						  crypto_page_idx);
> > +				if (ret)
> > +					goto out_finish;
> > +				data[thr].cmp_len = *(size_t *)first_page;
> > +			} else {
> > +				data[thr].cmp_len = *(size_t *)page[pg];
> > +			}
> >  			if (unlikely(!data[thr].cmp_len ||
> >  			             data[thr].cmp_len >
> >  			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
> > @@ -1358,8 +1517,26 @@ static int load_image_lzo(struct swap_map_handle *handle,
> >  			for (off = 0;
> >  			     off < LZO_HEADER + data[thr].cmp_len;
> >  			     off += PAGE_SIZE) {
> > -				memcpy(data[thr].cmp + off,
> > -				       page[pg], PAGE_SIZE);
> > +				if (handle->crypto) {
> > +					/*
> > +					 * Decrypt the compressed data
> > +					 * and leverage the decompression
> > +					 * threads to get it done.
> > +					 */
> > +					ret = crypto_data(page[pg],
> > +							  PAGE_SIZE,
> > +							  data[thr].cmp + off,
> > +							  PAGE_SIZE,
> > +							  false,
> > +							  crypto_page_idx);
> > +					if (ret)
> > +						goto out_finish;
> > +					crypto_page_idx++;
> > +				} else {
> > +					memcpy(data[thr].cmp + off,
> > +						page[pg], PAGE_SIZE);
> > +
> > +				}
> >  				have--;
> >  				want++;
> >  				if (++pg >= ring_size)
> > @@ -1452,6 +1629,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
> >  out_clean:
> >  	for (i = 0; i < ring_size; i++)
> >  		free_page((unsigned long)page[i]);
> > +	if (first_page)
> > +		free_page((unsigned long)first_page);
> >  	if (crc) {
> >  		if (crc->thr)
> >  			kthread_stop(crc->thr);
> > @@ -1482,6 +1661,7 @@ int swsusp_read(unsigned int *flags_p)
> >  	struct swsusp_info *header;
> >  
> >  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
> > +	memset(&handle, 0, sizeof(struct swap_map_handle));
> >  	error = snapshot_write_next(&snapshot);
> >  	if (error < PAGE_SIZE)
> >  		return error < 0 ? error : -EFAULT;
> > @@ -1489,6 +1669,16 @@ int swsusp_read(unsigned int *flags_p)
> >  	error = get_swap_reader(&handle, flags_p);
> >  	if (error)
> >  		goto end;
> > +	if (*flags_p & SF_ENCRYPT_MODE) {
> > +		error = crypto_init(false);
> > +		if (!error) {
> > +			/* The image has been encrypted. */
> > +			handle.crypto = true;
> > +		} else {
> > +			pr_err("Failed to init cipher during resume.\n");
> > +			goto end;
> > +		}
> > +	}
> >  	if (!error)
> >  		error = swap_read_page(&handle, header, NULL);
> >  	if (!error) {
> > @@ -1526,6 +1716,9 @@ int swsusp_check(void)
> >  
> >  		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
> >  			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
> > +			/* Read salt passed from previous kernel. */
> > +			if (swsusp_header->flags & SF_ENCRYPT_MODE)
> > +				crypto_restore((void *)&swsusp_header->salt);
> >  			/* Reset swap signature now */
> >  			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
> >  						swsusp_resume_block,
> > -- 
> > 2.7.4
> >
joeyli June 28, 2018, 2:28 p.m. UTC | #3
On Thu, Jun 28, 2018 at 09:50:17PM +0800, Yu Chen wrote:
> Hi,
> On Thu, Jun 28, 2018 at 09:07:20PM +0800, joeyli wrote:
> > Hi Chen Yu,
> > 
> > On Wed, Jun 20, 2018 at 05:40:32PM +0800, Chen Yu wrote:
> > > Use the helper functions introduced previously to encrypt
> > > the page data before they are submitted to the block device.
> > > Besides, for the case of hibernation compression, the data
> > > are firstly compressed and then encrypted, and vice versa
> > > for the resume process.
> > >
> > 
> > I want to suggest my solution that it direct signs/encrypts the
> > memory snapshot image. This solution is already shipped with
> > SLE12 a couple of years:
> > 
> > https://github.com/joeyli/linux-s4sign/commits/s4sign-hmac-encrypted-key-v0.2-v4.17-rc3
> > 
> I did not see image page encryption in above link, if I understand

PM / hibernate: Generate and verify signature for snapshot image
https://github.com/joeyli/linux-s4sign/commit/bae39460393ada4c0226dd07cd5e3afcef86b71f

PM / hibernate: snapshot image encryption
https://github.com/joeyli/linux-s4sign/commit/6a9a0113bb221c036ebd0f6321b7191283fe4929

The above patches sign and encrypt the data pages in snapshot image.
It puts the signature to header.

> correctly, your code focus on signation and encrypt the hidden data,
> but not target for the whole snapshot data.

Please ignore the hidden area because we already encrypted snapshot image.
Those data doesn't need to erase from snapshot anymore. I will remove
the hidden area patches in next version. 

> > The above patches still need to clean up. I am working on some
> > other bugs, but I can clean up and send out it ASAP.
> > 
> > The advantage of this solution is that it produces a signed and
> > encrypted image. Not just for writing to block device by kernel,
> > it also can provide a signed/encrypted image to user space. User
> > space can store the encrypted image to anywhere.
> > 
> > I am OK for your user space key generator because I didn't have
> > similar solution yet. I am working on the EFI master key and also
> > want to adapt hibernation to keyring. I will continue the works.
> > 
> The user space tool can easily add the keyring support besides
> ioctl if needed.
>

Understood.

Thanks
Joey Lee
 
> Best,
> Yu
> > Thanks a lot!
> > Joey Lee
> >  
> > > Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > Cc: Pavel Machek <pavel@ucw.cz>
> > > Cc: Len Brown <len.brown@intel.com>
> > > Cc: Borislav Petkov <bp@alien8.de>
> > > Cc: "Lee, Chun-Yi" <jlee@suse.com>
> > > Cc: linux-pm@vger.kernel.org
> > > Cc: linux-kernel@vger.kernel.org
> > > Signed-off-by: Chen Yu <yu.c.chen@intel.com>
> > > ---
> > >  kernel/power/power.h |   1 +
> > >  kernel/power/swap.c  | 215 ++++++++++++++++++++++++++++++++++++++++++++++++---
> > >  2 files changed, 205 insertions(+), 11 deletions(-)
> > > 
> > > diff --git a/kernel/power/power.h b/kernel/power/power.h
> > > index 660aac3..637695c 100644
> > > --- a/kernel/power/power.h
> > > +++ b/kernel/power/power.h
> > > @@ -207,6 +207,7 @@ extern int swsusp_swap_in_use(void);
> > >  #define SF_PLATFORM_MODE	1
> > >  #define SF_NOCOMPRESS_MODE	2
> > >  #define SF_CRC32_MODE	        4
> > > +#define SF_ENCRYPT_MODE		8
> > >  
> > >  /* kernel/power/hibernate.c */
> > >  extern int swsusp_check(void);
> > > diff --git a/kernel/power/swap.c b/kernel/power/swap.c
> > > index c2bcf97..2b6b3d0 100644
> > > --- a/kernel/power/swap.c
> > > +++ b/kernel/power/swap.c
> > > @@ -102,14 +102,16 @@ struct swap_map_handle {
> > >  	unsigned int k;
> > >  	unsigned long reqd_free_pages;
> > >  	u32 crc32;
> > > +	bool crypto;
> > >  };
> > >  
> > >  struct swsusp_header {
> > >  	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
> > > -	              sizeof(u32)];
> > > +			sizeof(u32) - HIBERNATE_SALT_BYTES];
> > >  	u32	crc32;
> > >  	sector_t image;
> > >  	unsigned int flags;	/* Flags to pass to the "boot" kernel */
> > > +	char salt[HIBERNATE_SALT_BYTES];
> > >  	char	orig_sig[10];
> > >  	char	sig[10];
> > >  } __packed;
> > > @@ -127,6 +129,53 @@ struct swsusp_extent {
> > >  	unsigned long end;
> > >  };
> > >  
> > > +/* For encryption/decryption. */
> > > +static struct hibernation_crypto *hibernation_crypto_ops;
> > > +
> > > +void set_hibernation_ops(struct hibernation_crypto *ops)
> > > +{
> > > +	hibernation_crypto_ops = ops;
> > > +}
> > > +EXPORT_SYMBOL_GPL(set_hibernation_ops);
> > > +
> > > +static int crypto_data(const char *inbuf,
> > > +			    int inlen,
> > > +			    char *outbuf,
> > > +			    int outlen,
> > > +			    bool encrypt,
> > > +			    int page_idx)
> > > +{
> > > +	if (hibernation_crypto_ops &&
> > > +	    hibernation_crypto_ops->crypto_data)
> > > +		return hibernation_crypto_ops->crypto_data(inbuf,
> > > +			inlen, outbuf, outlen, encrypt, page_idx);
> > > +	else
> > > +		return -EINVAL;
> > > +}
> > > +
> > > +static void crypto_save(void *outbuf)
> > > +{
> > > +	if (hibernation_crypto_ops &&
> > > +	    hibernation_crypto_ops->save)
> > > +		hibernation_crypto_ops->save(outbuf);
> > > +}
> > > +
> > > +static void crypto_restore(void *inbuf)
> > > +{
> > > +	if (hibernation_crypto_ops &&
> > > +	    hibernation_crypto_ops->restore)
> > > +		hibernation_crypto_ops->restore(inbuf);
> > > +}
> > > +
> > > +static int crypto_init(bool suspend)
> > > +{
> > > +	if (hibernation_crypto_ops &&
> > > +	    hibernation_crypto_ops->init)
> > > +		return hibernation_crypto_ops->init(suspend);
> > > +	else
> > > +		return -EINVAL;
> > > +}
> > > +
> > >  static struct rb_root swsusp_extents = RB_ROOT;
> > >  
> > >  static int swsusp_extents_insert(unsigned long swap_offset)
> > > @@ -318,6 +367,10 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
> > >  		swsusp_header->flags = flags;
> > >  		if (flags & SF_CRC32_MODE)
> > >  			swsusp_header->crc32 = handle->crc32;
> > > +		if (handle->crypto) {
> > > +			swsusp_header->flags |= SF_ENCRYPT_MODE;
> > > +			crypto_save((void *)swsusp_header->salt);
> > > +		}
> > >  		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
> > >  				      swsusp_resume_block, swsusp_header, NULL);
> > >  	} else {
> > > @@ -535,11 +588,12 @@ static int save_image(struct swap_map_handle *handle,
> > >  {
> > >  	unsigned int m;
> > >  	int ret;
> > > -	int nr_pages;
> > > +	int nr_pages, crypto_page_idx;
> > >  	int err2;
> > >  	struct hib_bio_batch hb;
> > >  	ktime_t start;
> > >  	ktime_t stop;
> > > +	void *tmp = NULL, *crypt_buf = NULL;
> > >  
> > >  	hib_init_batch(&hb);
> > >  
> > > @@ -549,12 +603,33 @@ static int save_image(struct swap_map_handle *handle,
> > >  	if (!m)
> > >  		m = 1;
> > >  	nr_pages = 0;
> > > +	crypto_page_idx = 0;
> > > +	if (handle->crypto) {
> > > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > > +		if (!crypt_buf)
> > > +			return -ENOMEM;
> > > +	}
> > > +
> > >  	start = ktime_get();
> > >  	while (1) {
> > >  		ret = snapshot_read_next(snapshot);
> > >  		if (ret <= 0)
> > >  			break;
> > > -		ret = swap_write_page(handle, data_of(*snapshot), &hb);
> > > +		tmp = data_of(*snapshot);
> > > +		if (handle->crypto) {
> > > +			/* Encryption before submit_io.*/
> > > +			ret = crypto_data(data_of(*snapshot),
> > > +					  PAGE_SIZE,
> > > +					  crypt_buf,
> > > +					  PAGE_SIZE,
> > > +					  true,
> > > +					  crypto_page_idx);
> > > +			if (ret)
> > > +				goto out;
> > > +			crypto_page_idx++;
> > > +			tmp = crypt_buf;
> > > +		}
> > > +		ret = swap_write_page(handle, tmp, &hb);
> > >  		if (ret)
> > >  			break;
> > >  		if (!(nr_pages % m))
> > > @@ -569,6 +644,9 @@ static int save_image(struct swap_map_handle *handle,
> > >  	if (!ret)
> > >  		pr_info("Image saving done\n");
> > >  	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
> > > + out:
> > > +	if (crypt_buf)
> > > +		free_page((unsigned long)crypt_buf);
> > >  	return ret;
> > >  }
> > >  
> > > @@ -671,7 +749,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
> > >  {
> > >  	unsigned int m;
> > >  	int ret = 0;
> > > -	int nr_pages;
> > > +	int nr_pages, crypto_page_idx;
> > >  	int err2;
> > >  	struct hib_bio_batch hb;
> > >  	ktime_t start;
> > > @@ -767,6 +845,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
> > >  	if (!m)
> > >  		m = 1;
> > >  	nr_pages = 0;
> > > +	crypto_page_idx = 0;
> > >  	start = ktime_get();
> > >  	for (;;) {
> > >  		for (thr = 0; thr < nr_threads; thr++) {
> > > @@ -835,7 +914,25 @@ static int save_image_lzo(struct swap_map_handle *handle,
> > >  			for (off = 0;
> > >  			     off < LZO_HEADER + data[thr].cmp_len;
> > >  			     off += PAGE_SIZE) {
> > > -				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
> > > +				if (handle->crypto) {
> > > +					/*
> > > +					 * Encrypt the compressed data
> > > +					 * before we write them to the
> > > +					 * block device.
> > > +					 */
> > > +					ret = crypto_data(data[thr].cmp + off,
> > > +							  PAGE_SIZE,
> > > +							  page,
> > > +							  PAGE_SIZE,
> > > +							  true,
> > > +							  crypto_page_idx);
> > > +					if (ret)
> > > +						goto out_finish;
> > > +					crypto_page_idx++;
> > > +				} else {
> > > +					memcpy(page, data[thr].cmp + off,
> > > +						PAGE_SIZE);
> > > +				}
> > >  
> > >  				ret = swap_write_page(handle, page, &hb);
> > >  				if (ret)
> > > @@ -909,6 +1006,7 @@ int swsusp_write(unsigned int flags)
> > >  	int error;
> > >  
> > >  	pages = snapshot_get_image_size();
> > > +	memset(&handle, 0, sizeof(struct swap_map_handle));
> > >  	error = get_swap_writer(&handle);
> > >  	if (error) {
> > >  		pr_err("Cannot get swap writer\n");
> > > @@ -922,6 +1020,9 @@ int swsusp_write(unsigned int flags)
> > >  		}
> > >  	}
> > >  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
> > > +	if (!crypto_init(true))
> > > +		/* The image needs to be encrypted. */
> > > +		handle.crypto = true;
> > >  	error = snapshot_read_next(&snapshot);
> > >  	if (error < PAGE_SIZE) {
> > >  		if (error >= 0)
> > > @@ -1059,7 +1160,8 @@ static int load_image(struct swap_map_handle *handle,
> > >  	ktime_t stop;
> > >  	struct hib_bio_batch hb;
> > >  	int err2;
> > > -	unsigned nr_pages;
> > > +	unsigned nr_pages, crypto_page_idx;
> > > +	void *crypt_buf = NULL;
> > >  
> > >  	hib_init_batch(&hb);
> > >  
> > > @@ -1069,18 +1171,42 @@ static int load_image(struct swap_map_handle *handle,
> > >  	if (!m)
> > >  		m = 1;
> > >  	nr_pages = 0;
> > > +	crypto_page_idx = 0;
> > > +	if (handle->crypto) {
> > > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > > +		if (!crypt_buf)
> > > +			return -ENOMEM;
> > > +	}
> > >  	start = ktime_get();
> > >  	for ( ; ; ) {
> > >  		ret = snapshot_write_next(snapshot);
> > >  		if (ret <= 0)
> > >  			break;
> > > -		ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > +		if (handle->crypto)
> > > +			ret = swap_read_page(handle, crypt_buf, &hb);
> > > +		else
> > > +			ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > >  		if (ret)
> > >  			break;
> > >  		if (snapshot->sync_read)
> > >  			ret = hib_wait_io(&hb);
> > >  		if (ret)
> > >  			break;
> > > +		if (handle->crypto) {
> > > +			/*
> > > +			 * Need a decryption for the
> > > +			 * data read from the block
> > > +			 * device.
> > > +			 */
> > > +			ret = crypto_data(crypt_buf, PAGE_SIZE,
> > > +					  data_of(*snapshot),
> > > +					  PAGE_SIZE,
> > > +					  false,
> > > +					  crypto_page_idx);
> > > +			if (ret)
> > > +				break;
> > > +			crypto_page_idx++;
> > > +		}
> > >  		if (!(nr_pages % m))
> > >  			pr_info("Image loading progress: %3d%%\n",
> > >  				nr_pages / m * 10);
> > > @@ -1097,6 +1223,8 @@ static int load_image(struct swap_map_handle *handle,
> > >  			ret = -ENODATA;
> > >  	}
> > >  	swsusp_show_speed(start, stop, nr_to_read, "Read");
> > > +	if (crypt_buf)
> > > +		free_page((unsigned long)crypt_buf);
> > >  	return ret;
> > >  }
> > >  
> > > @@ -1164,7 +1292,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > >  	struct hib_bio_batch hb;
> > >  	ktime_t start;
> > >  	ktime_t stop;
> > > -	unsigned nr_pages;
> > > +	unsigned nr_pages, crypto_page_idx;
> > >  	size_t off;
> > >  	unsigned i, thr, run_threads, nr_threads;
> > >  	unsigned ring = 0, pg = 0, ring_size = 0,
> > > @@ -1173,6 +1301,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > >  	unsigned char **page = NULL;
> > >  	struct dec_data *data = NULL;
> > >  	struct crc_data *crc = NULL;
> > > +	void *first_page = NULL;
> > >  
> > >  	hib_init_batch(&hb);
> > >  
> > > @@ -1278,6 +1407,18 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > >  	}
> > >  	want = ring_size = i;
> > >  
> > > +	/*
> > > +	 * The first page of data[thr] contains the length of
> > > +	 * compressed data, this page should not mess up the
> > > +	 * read buffer, so we allocate a separate page for it.
> > > +	 */
> > > +	if (handle->crypto) {
> > > +		first_page = (void *)get_zeroed_page(GFP_KERNEL);
> > > +		if (!first_page) {
> > > +			ret = -ENOMEM;
> > > +			goto out_clean;
> > > +		}
> > > +	}
> > >  	pr_info("Using %u thread(s) for decompression\n", nr_threads);
> > >  	pr_info("Loading and decompressing image data (%u pages)...\n",
> > >  		nr_to_read);
> > > @@ -1285,6 +1426,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > >  	if (!m)
> > >  		m = 1;
> > >  	nr_pages = 0;
> > > +	crypto_page_idx = 0;
> > >  	start = ktime_get();
> > >  
> > >  	ret = snapshot_write_next(snapshot);
> > > @@ -1336,7 +1478,24 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > >  		}
> > >  
> > >  		for (thr = 0; have && thr < nr_threads; thr++) {
> > > -			data[thr].cmp_len = *(size_t *)page[pg];
> > > +			if (handle->crypto) {
> > > +				/*
> > > +				 * Need to decrypt the first page
> > > +				 * of each data[thr], which contains
> > > +				 * the compressed data length.
> > > +				 */
> > > +				ret = crypto_data(page[pg],
> > > +						  PAGE_SIZE,
> > > +						  first_page,
> > > +						  PAGE_SIZE,
> > > +						  false,
> > > +						  crypto_page_idx);
> > > +				if (ret)
> > > +					goto out_finish;
> > > +				data[thr].cmp_len = *(size_t *)first_page;
> > > +			} else {
> > > +				data[thr].cmp_len = *(size_t *)page[pg];
> > > +			}
> > >  			if (unlikely(!data[thr].cmp_len ||
> > >  			             data[thr].cmp_len >
> > >  			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
> > > @@ -1358,8 +1517,26 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > >  			for (off = 0;
> > >  			     off < LZO_HEADER + data[thr].cmp_len;
> > >  			     off += PAGE_SIZE) {
> > > -				memcpy(data[thr].cmp + off,
> > > -				       page[pg], PAGE_SIZE);
> > > +				if (handle->crypto) {
> > > +					/*
> > > +					 * Decrypt the compressed data
> > > +					 * and leverage the decompression
> > > +					 * threads to get it done.
> > > +					 */
> > > +					ret = crypto_data(page[pg],
> > > +							  PAGE_SIZE,
> > > +							  data[thr].cmp + off,
> > > +							  PAGE_SIZE,
> > > +							  false,
> > > +							  crypto_page_idx);
> > > +					if (ret)
> > > +						goto out_finish;
> > > +					crypto_page_idx++;
> > > +				} else {
> > > +					memcpy(data[thr].cmp + off,
> > > +						page[pg], PAGE_SIZE);
> > > +
> > > +				}
> > >  				have--;
> > >  				want++;
> > >  				if (++pg >= ring_size)
> > > @@ -1452,6 +1629,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > >  out_clean:
> > >  	for (i = 0; i < ring_size; i++)
> > >  		free_page((unsigned long)page[i]);
> > > +	if (first_page)
> > > +		free_page((unsigned long)first_page);
> > >  	if (crc) {
> > >  		if (crc->thr)
> > >  			kthread_stop(crc->thr);
> > > @@ -1482,6 +1661,7 @@ int swsusp_read(unsigned int *flags_p)
> > >  	struct swsusp_info *header;
> > >  
> > >  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
> > > +	memset(&handle, 0, sizeof(struct swap_map_handle));
> > >  	error = snapshot_write_next(&snapshot);
> > >  	if (error < PAGE_SIZE)
> > >  		return error < 0 ? error : -EFAULT;
> > > @@ -1489,6 +1669,16 @@ int swsusp_read(unsigned int *flags_p)
> > >  	error = get_swap_reader(&handle, flags_p);
> > >  	if (error)
> > >  		goto end;
> > > +	if (*flags_p & SF_ENCRYPT_MODE) {
> > > +		error = crypto_init(false);
> > > +		if (!error) {
> > > +			/* The image has been encrypted. */
> > > +			handle.crypto = true;
> > > +		} else {
> > > +			pr_err("Failed to init cipher during resume.\n");
> > > +			goto end;
> > > +		}
> > > +	}
> > >  	if (!error)
> > >  		error = swap_read_page(&handle, header, NULL);
> > >  	if (!error) {
> > > @@ -1526,6 +1716,9 @@ int swsusp_check(void)
> > >  
> > >  		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
> > >  			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
> > > +			/* Read salt passed from previous kernel. */
> > > +			if (swsusp_header->flags & SF_ENCRYPT_MODE)
> > > +				crypto_restore((void *)&swsusp_header->salt);
> > >  			/* Reset swap signature now */
> > >  			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
> > >  						swsusp_resume_block,
> > > -- 
> > > 2.7.4
> > >
Chen Yu June 28, 2018, 2:52 p.m. UTC | #4
Hi,
On Thu, Jun 28, 2018 at 10:28:56PM +0800, joeyli wrote:
> On Thu, Jun 28, 2018 at 09:50:17PM +0800, Yu Chen wrote:
> > Hi,
> > On Thu, Jun 28, 2018 at 09:07:20PM +0800, joeyli wrote:
> > > Hi Chen Yu,
> > > 
> > > On Wed, Jun 20, 2018 at 05:40:32PM +0800, Chen Yu wrote:
> > > > Use the helper functions introduced previously to encrypt
> > > > the page data before they are submitted to the block device.
> > > > Besides, for the case of hibernation compression, the data
> > > > are firstly compressed and then encrypted, and vice versa
> > > > for the resume process.
> > > >
> > > 
> > > I want to suggest my solution that it direct signs/encrypts the
> > > memory snapshot image. This solution is already shipped with
> > > SLE12 a couple of years:
> > > 
> > > https://github.com/joeyli/linux-s4sign/commits/s4sign-hmac-encrypted-key-v0.2-v4.17-rc3
> > > 
> > I did not see image page encryption in above link, if I understand
> 
> PM / hibernate: Generate and verify signature for snapshot image
> https://github.com/joeyli/linux-s4sign/commit/bae39460393ada4c0226dd07cd5e3afcef86b71f
>
> PM / hibernate: snapshot image encryption
> https://github.com/joeyli/linux-s4sign/commit/6a9a0113bb221c036ebd0f6321b7191283fe4929
>
> The above patches sign and encrypt the data pages in snapshot image.
> It puts the signature to header.
>
It looks like your signature code can be simplyly added on top of the
solution we proposed here, how about we collaborating on this task? 
just my 2 cents, 
1. The cryption code should be indepent of the snapshot code, and
   this is why we implement it as a kernel module for that in PATCH[1/3].
2. There's no need to traverse the snapshot image twice, if the
   image is large(there's requirement on servers now) we can
   simplyly do the encryption before the disk IO, and this is
   why PATCH[2/3] looks like this.
> > correctly, your code focus on signation and encrypt the hidden data,
> > but not target for the whole snapshot data.
> 
> Please ignore the hidden area because we already encrypted snapshot image.
> Those data doesn't need to erase from snapshot anymore. I will remove
> the hidden area patches in next version. 
> 
> > > The above patches still need to clean up. I am working on some
> > > other bugs, but I can clean up and send out it ASAP.
> > > 
> > > The advantage of this solution is that it produces a signed and
> > > encrypted image. Not just for writing to block device by kernel,
> > > it also can provide a signed/encrypted image to user space. User
> > > space can store the encrypted image to anywhere.
> > > 
> > > I am OK for your user space key generator because I didn't have
> > > similar solution yet. I am working on the EFI master key and also
> > > want to adapt hibernation to keyring. I will continue the works.
> > > 
> > The user space tool can easily add the keyring support besides
> > ioctl if needed.
> >
> 
> Understood.
> 
> Thanks
> Joey Lee
>  
Best,
Yu
> > Best,
> > Yu
> > > Thanks a lot!
> > > Joey Lee
> > >  
> > > > Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > Cc: Pavel Machek <pavel@ucw.cz>
> > > > Cc: Len Brown <len.brown@intel.com>
> > > > Cc: Borislav Petkov <bp@alien8.de>
> > > > Cc: "Lee, Chun-Yi" <jlee@suse.com>
> > > > Cc: linux-pm@vger.kernel.org
> > > > Cc: linux-kernel@vger.kernel.org
> > > > Signed-off-by: Chen Yu <yu.c.chen@intel.com>
> > > > ---
> > > >  kernel/power/power.h |   1 +
> > > >  kernel/power/swap.c  | 215 ++++++++++++++++++++++++++++++++++++++++++++++++---
> > > >  2 files changed, 205 insertions(+), 11 deletions(-)
> > > > 
> > > > diff --git a/kernel/power/power.h b/kernel/power/power.h
> > > > index 660aac3..637695c 100644
> > > > --- a/kernel/power/power.h
> > > > +++ b/kernel/power/power.h
> > > > @@ -207,6 +207,7 @@ extern int swsusp_swap_in_use(void);
> > > >  #define SF_PLATFORM_MODE	1
> > > >  #define SF_NOCOMPRESS_MODE	2
> > > >  #define SF_CRC32_MODE	        4
> > > > +#define SF_ENCRYPT_MODE		8
> > > >  
> > > >  /* kernel/power/hibernate.c */
> > > >  extern int swsusp_check(void);
> > > > diff --git a/kernel/power/swap.c b/kernel/power/swap.c
> > > > index c2bcf97..2b6b3d0 100644
> > > > --- a/kernel/power/swap.c
> > > > +++ b/kernel/power/swap.c
> > > > @@ -102,14 +102,16 @@ struct swap_map_handle {
> > > >  	unsigned int k;
> > > >  	unsigned long reqd_free_pages;
> > > >  	u32 crc32;
> > > > +	bool crypto;
> > > >  };
> > > >  
> > > >  struct swsusp_header {
> > > >  	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
> > > > -	              sizeof(u32)];
> > > > +			sizeof(u32) - HIBERNATE_SALT_BYTES];
> > > >  	u32	crc32;
> > > >  	sector_t image;
> > > >  	unsigned int flags;	/* Flags to pass to the "boot" kernel */
> > > > +	char salt[HIBERNATE_SALT_BYTES];
> > > >  	char	orig_sig[10];
> > > >  	char	sig[10];
> > > >  } __packed;
> > > > @@ -127,6 +129,53 @@ struct swsusp_extent {
> > > >  	unsigned long end;
> > > >  };
> > > >  
> > > > +/* For encryption/decryption. */
> > > > +static struct hibernation_crypto *hibernation_crypto_ops;
> > > > +
> > > > +void set_hibernation_ops(struct hibernation_crypto *ops)
> > > > +{
> > > > +	hibernation_crypto_ops = ops;
> > > > +}
> > > > +EXPORT_SYMBOL_GPL(set_hibernation_ops);
> > > > +
> > > > +static int crypto_data(const char *inbuf,
> > > > +			    int inlen,
> > > > +			    char *outbuf,
> > > > +			    int outlen,
> > > > +			    bool encrypt,
> > > > +			    int page_idx)
> > > > +{
> > > > +	if (hibernation_crypto_ops &&
> > > > +	    hibernation_crypto_ops->crypto_data)
> > > > +		return hibernation_crypto_ops->crypto_data(inbuf,
> > > > +			inlen, outbuf, outlen, encrypt, page_idx);
> > > > +	else
> > > > +		return -EINVAL;
> > > > +}
> > > > +
> > > > +static void crypto_save(void *outbuf)
> > > > +{
> > > > +	if (hibernation_crypto_ops &&
> > > > +	    hibernation_crypto_ops->save)
> > > > +		hibernation_crypto_ops->save(outbuf);
> > > > +}
> > > > +
> > > > +static void crypto_restore(void *inbuf)
> > > > +{
> > > > +	if (hibernation_crypto_ops &&
> > > > +	    hibernation_crypto_ops->restore)
> > > > +		hibernation_crypto_ops->restore(inbuf);
> > > > +}
> > > > +
> > > > +static int crypto_init(bool suspend)
> > > > +{
> > > > +	if (hibernation_crypto_ops &&
> > > > +	    hibernation_crypto_ops->init)
> > > > +		return hibernation_crypto_ops->init(suspend);
> > > > +	else
> > > > +		return -EINVAL;
> > > > +}
> > > > +
> > > >  static struct rb_root swsusp_extents = RB_ROOT;
> > > >  
> > > >  static int swsusp_extents_insert(unsigned long swap_offset)
> > > > @@ -318,6 +367,10 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
> > > >  		swsusp_header->flags = flags;
> > > >  		if (flags & SF_CRC32_MODE)
> > > >  			swsusp_header->crc32 = handle->crc32;
> > > > +		if (handle->crypto) {
> > > > +			swsusp_header->flags |= SF_ENCRYPT_MODE;
> > > > +			crypto_save((void *)swsusp_header->salt);
> > > > +		}
> > > >  		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
> > > >  				      swsusp_resume_block, swsusp_header, NULL);
> > > >  	} else {
> > > > @@ -535,11 +588,12 @@ static int save_image(struct swap_map_handle *handle,
> > > >  {
> > > >  	unsigned int m;
> > > >  	int ret;
> > > > -	int nr_pages;
> > > > +	int nr_pages, crypto_page_idx;
> > > >  	int err2;
> > > >  	struct hib_bio_batch hb;
> > > >  	ktime_t start;
> > > >  	ktime_t stop;
> > > > +	void *tmp = NULL, *crypt_buf = NULL;
> > > >  
> > > >  	hib_init_batch(&hb);
> > > >  
> > > > @@ -549,12 +603,33 @@ static int save_image(struct swap_map_handle *handle,
> > > >  	if (!m)
> > > >  		m = 1;
> > > >  	nr_pages = 0;
> > > > +	crypto_page_idx = 0;
> > > > +	if (handle->crypto) {
> > > > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > > > +		if (!crypt_buf)
> > > > +			return -ENOMEM;
> > > > +	}
> > > > +
> > > >  	start = ktime_get();
> > > >  	while (1) {
> > > >  		ret = snapshot_read_next(snapshot);
> > > >  		if (ret <= 0)
> > > >  			break;
> > > > -		ret = swap_write_page(handle, data_of(*snapshot), &hb);
> > > > +		tmp = data_of(*snapshot);
> > > > +		if (handle->crypto) {
> > > > +			/* Encryption before submit_io.*/
> > > > +			ret = crypto_data(data_of(*snapshot),
> > > > +					  PAGE_SIZE,
> > > > +					  crypt_buf,
> > > > +					  PAGE_SIZE,
> > > > +					  true,
> > > > +					  crypto_page_idx);
> > > > +			if (ret)
> > > > +				goto out;
> > > > +			crypto_page_idx++;
> > > > +			tmp = crypt_buf;
> > > > +		}
> > > > +		ret = swap_write_page(handle, tmp, &hb);
> > > >  		if (ret)
> > > >  			break;
> > > >  		if (!(nr_pages % m))
> > > > @@ -569,6 +644,9 @@ static int save_image(struct swap_map_handle *handle,
> > > >  	if (!ret)
> > > >  		pr_info("Image saving done\n");
> > > >  	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
> > > > + out:
> > > > +	if (crypt_buf)
> > > > +		free_page((unsigned long)crypt_buf);
> > > >  	return ret;
> > > >  }
> > > >  
> > > > @@ -671,7 +749,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
> > > >  {
> > > >  	unsigned int m;
> > > >  	int ret = 0;
> > > > -	int nr_pages;
> > > > +	int nr_pages, crypto_page_idx;
> > > >  	int err2;
> > > >  	struct hib_bio_batch hb;
> > > >  	ktime_t start;
> > > > @@ -767,6 +845,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
> > > >  	if (!m)
> > > >  		m = 1;
> > > >  	nr_pages = 0;
> > > > +	crypto_page_idx = 0;
> > > >  	start = ktime_get();
> > > >  	for (;;) {
> > > >  		for (thr = 0; thr < nr_threads; thr++) {
> > > > @@ -835,7 +914,25 @@ static int save_image_lzo(struct swap_map_handle *handle,
> > > >  			for (off = 0;
> > > >  			     off < LZO_HEADER + data[thr].cmp_len;
> > > >  			     off += PAGE_SIZE) {
> > > > -				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
> > > > +				if (handle->crypto) {
> > > > +					/*
> > > > +					 * Encrypt the compressed data
> > > > +					 * before we write them to the
> > > > +					 * block device.
> > > > +					 */
> > > > +					ret = crypto_data(data[thr].cmp + off,
> > > > +							  PAGE_SIZE,
> > > > +							  page,
> > > > +							  PAGE_SIZE,
> > > > +							  true,
> > > > +							  crypto_page_idx);
> > > > +					if (ret)
> > > > +						goto out_finish;
> > > > +					crypto_page_idx++;
> > > > +				} else {
> > > > +					memcpy(page, data[thr].cmp + off,
> > > > +						PAGE_SIZE);
> > > > +				}
> > > >  
> > > >  				ret = swap_write_page(handle, page, &hb);
> > > >  				if (ret)
> > > > @@ -909,6 +1006,7 @@ int swsusp_write(unsigned int flags)
> > > >  	int error;
> > > >  
> > > >  	pages = snapshot_get_image_size();
> > > > +	memset(&handle, 0, sizeof(struct swap_map_handle));
> > > >  	error = get_swap_writer(&handle);
> > > >  	if (error) {
> > > >  		pr_err("Cannot get swap writer\n");
> > > > @@ -922,6 +1020,9 @@ int swsusp_write(unsigned int flags)
> > > >  		}
> > > >  	}
> > > >  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
> > > > +	if (!crypto_init(true))
> > > > +		/* The image needs to be encrypted. */
> > > > +		handle.crypto = true;
> > > >  	error = snapshot_read_next(&snapshot);
> > > >  	if (error < PAGE_SIZE) {
> > > >  		if (error >= 0)
> > > > @@ -1059,7 +1160,8 @@ static int load_image(struct swap_map_handle *handle,
> > > >  	ktime_t stop;
> > > >  	struct hib_bio_batch hb;
> > > >  	int err2;
> > > > -	unsigned nr_pages;
> > > > +	unsigned nr_pages, crypto_page_idx;
> > > > +	void *crypt_buf = NULL;
> > > >  
> > > >  	hib_init_batch(&hb);
> > > >  
> > > > @@ -1069,18 +1171,42 @@ static int load_image(struct swap_map_handle *handle,
> > > >  	if (!m)
> > > >  		m = 1;
> > > >  	nr_pages = 0;
> > > > +	crypto_page_idx = 0;
> > > > +	if (handle->crypto) {
> > > > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > > > +		if (!crypt_buf)
> > > > +			return -ENOMEM;
> > > > +	}
> > > >  	start = ktime_get();
> > > >  	for ( ; ; ) {
> > > >  		ret = snapshot_write_next(snapshot);
> > > >  		if (ret <= 0)
> > > >  			break;
> > > > -		ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > > +		if (handle->crypto)
> > > > +			ret = swap_read_page(handle, crypt_buf, &hb);
> > > > +		else
> > > > +			ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > >  		if (ret)
> > > >  			break;
> > > >  		if (snapshot->sync_read)
> > > >  			ret = hib_wait_io(&hb);
> > > >  		if (ret)
> > > >  			break;
> > > > +		if (handle->crypto) {
> > > > +			/*
> > > > +			 * Need a decryption for the
> > > > +			 * data read from the block
> > > > +			 * device.
> > > > +			 */
> > > > +			ret = crypto_data(crypt_buf, PAGE_SIZE,
> > > > +					  data_of(*snapshot),
> > > > +					  PAGE_SIZE,
> > > > +					  false,
> > > > +					  crypto_page_idx);
> > > > +			if (ret)
> > > > +				break;
> > > > +			crypto_page_idx++;
> > > > +		}
> > > >  		if (!(nr_pages % m))
> > > >  			pr_info("Image loading progress: %3d%%\n",
> > > >  				nr_pages / m * 10);
> > > > @@ -1097,6 +1223,8 @@ static int load_image(struct swap_map_handle *handle,
> > > >  			ret = -ENODATA;
> > > >  	}
> > > >  	swsusp_show_speed(start, stop, nr_to_read, "Read");
> > > > +	if (crypt_buf)
> > > > +		free_page((unsigned long)crypt_buf);
> > > >  	return ret;
> > > >  }
> > > >  
> > > > @@ -1164,7 +1292,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > > >  	struct hib_bio_batch hb;
> > > >  	ktime_t start;
> > > >  	ktime_t stop;
> > > > -	unsigned nr_pages;
> > > > +	unsigned nr_pages, crypto_page_idx;
> > > >  	size_t off;
> > > >  	unsigned i, thr, run_threads, nr_threads;
> > > >  	unsigned ring = 0, pg = 0, ring_size = 0,
> > > > @@ -1173,6 +1301,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > > >  	unsigned char **page = NULL;
> > > >  	struct dec_data *data = NULL;
> > > >  	struct crc_data *crc = NULL;
> > > > +	void *first_page = NULL;
> > > >  
> > > >  	hib_init_batch(&hb);
> > > >  
> > > > @@ -1278,6 +1407,18 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > > >  	}
> > > >  	want = ring_size = i;
> > > >  
> > > > +	/*
> > > > +	 * The first page of data[thr] contains the length of
> > > > +	 * compressed data, this page should not mess up the
> > > > +	 * read buffer, so we allocate a separate page for it.
> > > > +	 */
> > > > +	if (handle->crypto) {
> > > > +		first_page = (void *)get_zeroed_page(GFP_KERNEL);
> > > > +		if (!first_page) {
> > > > +			ret = -ENOMEM;
> > > > +			goto out_clean;
> > > > +		}
> > > > +	}
> > > >  	pr_info("Using %u thread(s) for decompression\n", nr_threads);
> > > >  	pr_info("Loading and decompressing image data (%u pages)...\n",
> > > >  		nr_to_read);
> > > > @@ -1285,6 +1426,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > > >  	if (!m)
> > > >  		m = 1;
> > > >  	nr_pages = 0;
> > > > +	crypto_page_idx = 0;
> > > >  	start = ktime_get();
> > > >  
> > > >  	ret = snapshot_write_next(snapshot);
> > > > @@ -1336,7 +1478,24 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > > >  		}
> > > >  
> > > >  		for (thr = 0; have && thr < nr_threads; thr++) {
> > > > -			data[thr].cmp_len = *(size_t *)page[pg];
> > > > +			if (handle->crypto) {
> > > > +				/*
> > > > +				 * Need to decrypt the first page
> > > > +				 * of each data[thr], which contains
> > > > +				 * the compressed data length.
> > > > +				 */
> > > > +				ret = crypto_data(page[pg],
> > > > +						  PAGE_SIZE,
> > > > +						  first_page,
> > > > +						  PAGE_SIZE,
> > > > +						  false,
> > > > +						  crypto_page_idx);
> > > > +				if (ret)
> > > > +					goto out_finish;
> > > > +				data[thr].cmp_len = *(size_t *)first_page;
> > > > +			} else {
> > > > +				data[thr].cmp_len = *(size_t *)page[pg];
> > > > +			}
> > > >  			if (unlikely(!data[thr].cmp_len ||
> > > >  			             data[thr].cmp_len >
> > > >  			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
> > > > @@ -1358,8 +1517,26 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > > >  			for (off = 0;
> > > >  			     off < LZO_HEADER + data[thr].cmp_len;
> > > >  			     off += PAGE_SIZE) {
> > > > -				memcpy(data[thr].cmp + off,
> > > > -				       page[pg], PAGE_SIZE);
> > > > +				if (handle->crypto) {
> > > > +					/*
> > > > +					 * Decrypt the compressed data
> > > > +					 * and leverage the decompression
> > > > +					 * threads to get it done.
> > > > +					 */
> > > > +					ret = crypto_data(page[pg],
> > > > +							  PAGE_SIZE,
> > > > +							  data[thr].cmp + off,
> > > > +							  PAGE_SIZE,
> > > > +							  false,
> > > > +							  crypto_page_idx);
> > > > +					if (ret)
> > > > +						goto out_finish;
> > > > +					crypto_page_idx++;
> > > > +				} else {
> > > > +					memcpy(data[thr].cmp + off,
> > > > +						page[pg], PAGE_SIZE);
> > > > +
> > > > +				}
> > > >  				have--;
> > > >  				want++;
> > > >  				if (++pg >= ring_size)
> > > > @@ -1452,6 +1629,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
> > > >  out_clean:
> > > >  	for (i = 0; i < ring_size; i++)
> > > >  		free_page((unsigned long)page[i]);
> > > > +	if (first_page)
> > > > +		free_page((unsigned long)first_page);
> > > >  	if (crc) {
> > > >  		if (crc->thr)
> > > >  			kthread_stop(crc->thr);
> > > > @@ -1482,6 +1661,7 @@ int swsusp_read(unsigned int *flags_p)
> > > >  	struct swsusp_info *header;
> > > >  
> > > >  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
> > > > +	memset(&handle, 0, sizeof(struct swap_map_handle));
> > > >  	error = snapshot_write_next(&snapshot);
> > > >  	if (error < PAGE_SIZE)
> > > >  		return error < 0 ? error : -EFAULT;
> > > > @@ -1489,6 +1669,16 @@ int swsusp_read(unsigned int *flags_p)
> > > >  	error = get_swap_reader(&handle, flags_p);
> > > >  	if (error)
> > > >  		goto end;
> > > > +	if (*flags_p & SF_ENCRYPT_MODE) {
> > > > +		error = crypto_init(false);
> > > > +		if (!error) {
> > > > +			/* The image has been encrypted. */
> > > > +			handle.crypto = true;
> > > > +		} else {
> > > > +			pr_err("Failed to init cipher during resume.\n");
> > > > +			goto end;
> > > > +		}
> > > > +	}
> > > >  	if (!error)
> > > >  		error = swap_read_page(&handle, header, NULL);
> > > >  	if (!error) {
> > > > @@ -1526,6 +1716,9 @@ int swsusp_check(void)
> > > >  
> > > >  		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
> > > >  			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
> > > > +			/* Read salt passed from previous kernel. */
> > > > +			if (swsusp_header->flags & SF_ENCRYPT_MODE)
> > > > +				crypto_restore((void *)&swsusp_header->salt);
> > > >  			/* Reset swap signature now */
> > > >  			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
> > > >  						swsusp_resume_block,
> > > > -- 
> > > > 2.7.4
> > > >
joeyli June 29, 2018, 12:59 p.m. UTC | #5
On Thu, Jun 28, 2018 at 10:52:07PM +0800, Yu Chen wrote:
> Hi,
> On Thu, Jun 28, 2018 at 10:28:56PM +0800, joeyli wrote:
> > On Thu, Jun 28, 2018 at 09:50:17PM +0800, Yu Chen wrote:
> > > Hi,
> > > On Thu, Jun 28, 2018 at 09:07:20PM +0800, joeyli wrote:
> > > > Hi Chen Yu,
> > > > 
> > > > On Wed, Jun 20, 2018 at 05:40:32PM +0800, Chen Yu wrote:
> > > > > Use the helper functions introduced previously to encrypt
> > > > > the page data before they are submitted to the block device.
> > > > > Besides, for the case of hibernation compression, the data
> > > > > are firstly compressed and then encrypted, and vice versa
> > > > > for the resume process.
> > > > >
> > > > 
> > > > I want to suggest my solution that it direct signs/encrypts the
> > > > memory snapshot image. This solution is already shipped with
> > > > SLE12 a couple of years:
> > > > 
> > > > https://github.com/joeyli/linux-s4sign/commits/s4sign-hmac-encrypted-key-v0.2-v4.17-rc3
> > > > 
> > > I did not see image page encryption in above link, if I understand
> > 
> > PM / hibernate: Generate and verify signature for snapshot image
> > https://github.com/joeyli/linux-s4sign/commit/bae39460393ada4c0226dd07cd5e3afcef86b71f
> >
> > PM / hibernate: snapshot image encryption
> > https://github.com/joeyli/linux-s4sign/commit/6a9a0113bb221c036ebd0f6321b7191283fe4929
> >
> > The above patches sign and encrypt the data pages in snapshot image.
> > It puts the signature to header.
> >
> It looks like your signature code can be simplyly added on top of the
> solution we proposed here, how about we collaborating on this task?

OK, I will base on your user key solution to respin my signature patches.
 
> just my 2 cents, 
> 1. The cryption code should be indepent of the snapshot code, and
>    this is why we implement it as a kernel module for that in PATCH[1/3].

Why the cryption code must be indepent of snapshot code?

> 2. There's no need to traverse the snapshot image twice, if the
>    image is large(there's requirement on servers now) we can
>    simplyly do the encryption before the disk IO, and this is
>    why PATCH[2/3] looks like this.

If the encryption solution is only for block device, then the uswsusp
interface must be locked-down when kernel is in locked mode:

uswsusp: Disable when the kernel is locked down
https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/commit/?h=lockdown-20180410&id=8732c1663d7c0305ae01ba5a1ee4d2299b7b4612

I still suggest to keep the solution to direct encript the snapshot
image for uswsusp because the snapshot can be encrypted by kernel
before user space get it.

I mean that if the uswsusp be used, then kernel direct encrypts the
snapshot image, otherwise kernel encrypts pages before block io.

On the other hand, I have a question about asynchronous block io.
Please see below...

> > > > > Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > > Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > > Cc: Pavel Machek <pavel@ucw.cz>
> > > > > Cc: Len Brown <len.brown@intel.com>
> > > > > Cc: Borislav Petkov <bp@alien8.de>
> > > > > Cc: "Lee, Chun-Yi" <jlee@suse.com>
> > > > > Cc: linux-pm@vger.kernel.org
> > > > > Cc: linux-kernel@vger.kernel.org
> > > > > Signed-off-by: Chen Yu <yu.c.chen@intel.com>
> > > > > ---
> > > > >  kernel/power/power.h |   1 +
> > > > >  kernel/power/swap.c  | 215 ++++++++++++++++++++++++++++++++++++++++++++++++---
> > > > >  2 files changed, 205 insertions(+), 11 deletions(-)
[...snip]
> > > > >  /* kernel/power/hibernate.c */
> > > > >  extern int swsusp_check(void);
> > > > > diff --git a/kernel/power/swap.c b/kernel/power/swap.c
> > > > > index c2bcf97..2b6b3d0 100644
> > > > > --- a/kernel/power/swap.c
> > > > > +++ b/kernel/power/swap.c
[...snip]
> > > > > @@ -1069,18 +1171,42 @@ static int load_image(struct swap_map_handle *handle,
> > > > >  	if (!m)
> > > > >  		m = 1;
> > > > >  	nr_pages = 0;
> > > > > +	crypto_page_idx = 0;
> > > > > +	if (handle->crypto) {
> > > > > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > > > > +		if (!crypt_buf)
> > > > > +			return -ENOMEM;
> > > > > +	}
> > > > >  	start = ktime_get();
> > > > >  	for ( ; ; ) {
> > > > >  		ret = snapshot_write_next(snapshot);
> > > > >  		if (ret <= 0)
> > > > >  			break;
> > > > > -		ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > > > +		if (handle->crypto)
> > > > > +			ret = swap_read_page(handle, crypt_buf, &hb);
> > > > > +		else
> > > > > +			ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > > >  		if (ret)
> > > > >  			break;
> > > > >  		if (snapshot->sync_read)
> > > > >  			ret = hib_wait_io(&hb);

In snapshot_write_next(), the logic will clean the snapshot->sync_read
when the buffer page doesn't equal to the original page. Which means
that the page can be read by asynchronous block io. Otherwise, kernel
calls hib_wait_io() to wait until the block io was done.

> > > > >  		if (ret)
> > > > >  			break;
> > > > > +		if (handle->crypto) {
> > > > > +			/*
> > > > > +			 * Need a decryption for the
> > > > > +			 * data read from the block
> > > > > +			 * device.
> > > > > +			 */
> > > > > +			ret = crypto_data(crypt_buf, PAGE_SIZE,
> > > > > +					  data_of(*snapshot),
> > > > > +					  PAGE_SIZE,
> > > > > +					  false,
> > > > > +					  crypto_page_idx);
> > > > > +			if (ret)
> > > > > +				break;
> > > > > +			crypto_page_idx++;
> > > > > +		}

The decryption is here in the for-loop. But maybe the page is still in
the block io queue for waiting the batch read? The page content is not
really read to memory when the crypto_data be run? 

> > > > >  		if (!(nr_pages % m))
> > > > >  			pr_info("Image loading progress: %3d%%\n",
> > > > >  				nr_pages / m * 10);
                nr_pages++;
        }
        err2 = hib_wait_io(&hb);
        stop = ktime_get();

When the for-loop is break, the above hib_wait_io(&hb) guarantees that
all asynchronous block io are done. Then all pages are read to memory.

I think that the decryption code must be moved after for-loop be break.
Or there have any callback hook in the asynchronous block io that we
can put the encryption code after the block io read the page.

Thanks a lot!
Joey Lee
Chen Yu July 6, 2018, 3:28 p.m. UTC | #6
Hi Joey Lee,
On Fri, Jun 29, 2018 at 08:59:43PM +0800, joeyli wrote:
> On Thu, Jun 28, 2018 at 10:52:07PM +0800, Yu Chen wrote:
> > Hi,
> > On Thu, Jun 28, 2018 at 10:28:56PM +0800, joeyli wrote:
> > > On Thu, Jun 28, 2018 at 09:50:17PM +0800, Yu Chen wrote:
> > > > Hi,
> > > > On Thu, Jun 28, 2018 at 09:07:20PM +0800, joeyli wrote:
> > > > > Hi Chen Yu,
> > > > > 
> > > > > On Wed, Jun 20, 2018 at 05:40:32PM +0800, Chen Yu wrote:
> > > > > > Use the helper functions introduced previously to encrypt
> > > > > > the page data before they are submitted to the block device.
> > > > > > Besides, for the case of hibernation compression, the data
> > > > > > are firstly compressed and then encrypted, and vice versa
> > > > > > for the resume process.
> > > > > >
> > > > > 
> > > > > I want to suggest my solution that it direct signs/encrypts the
> > > > > memory snapshot image. This solution is already shipped with
> > > > > SLE12 a couple of years:
> > > > > 
> > > > > https://github.com/joeyli/linux-s4sign/commits/s4sign-hmac-encrypted-key-v0.2-v4.17-rc3
> > > > > 
> > > > I did not see image page encryption in above link, if I understand
> > > 
> > > PM / hibernate: Generate and verify signature for snapshot image
> > > https://github.com/joeyli/linux-s4sign/commit/bae39460393ada4c0226dd07cd5e3afcef86b71f
> > >
> > > PM / hibernate: snapshot image encryption
> > > https://github.com/joeyli/linux-s4sign/commit/6a9a0113bb221c036ebd0f6321b7191283fe4929
> > >
> > > The above patches sign and encrypt the data pages in snapshot image.
> > > It puts the signature to header.
> > >
> > It looks like your signature code can be simplyly added on top of the
> > solution we proposed here, how about we collaborating on this task?
> 
> OK, I will base on your user key solution to respin my signature patches.
>  
> > just my 2 cents, 
> > 1. The cryption code should be indepent of the snapshot code, and
> >    this is why we implement it as a kernel module for that in PATCH[1/3].
> 
> Why the cryption code must be indepent of snapshot code?
>
Modules can be easier to be maintained and customized/improved in the future IMO..
> > 2. There's no need to traverse the snapshot image twice, if the
> >    image is large(there's requirement on servers now) we can
> >    simplyly do the encryption before the disk IO, and this is
> >    why PATCH[2/3] looks like this.
> 
> If the encryption solution is only for block device, then the uswsusp
> interface must be locked-down when kernel is in locked mode:
> 
> uswsusp: Disable when the kernel is locked down
> https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/commit/?h=lockdown-20180410&id=8732c1663d7c0305ae01ba5a1ee4d2299b7b4612
> 
> I still suggest to keep the solution to direct encript the snapshot
> image for uswsusp because the snapshot can be encrypted by kernel
> before user space get it.
> 
> I mean that if the uswsusp be used, then kernel direct encrypts the
> snapshot image, otherwise kernel encrypts pages before block io.
> 
I did not quite get the point, if the kernel has been locked down,
then the uswsusp is disabled, why the kernel encrypts the snapshot
for uswsusp?
> On the other hand, I have a question about asynchronous block io.
> Please see below...
> 
Okay.
> > > > > > Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > > > Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > > > Cc: Pavel Machek <pavel@ucw.cz>
> > > > > > Cc: Len Brown <len.brown@intel.com>
> > > > > > Cc: Borislav Petkov <bp@alien8.de>
> > > > > > Cc: "Lee, Chun-Yi" <jlee@suse.com>
> > > > > > Cc: linux-pm@vger.kernel.org
> > > > > > Cc: linux-kernel@vger.kernel.org
> > > > > > Signed-off-by: Chen Yu <yu.c.chen@intel.com>
> > > > > > ---
> > > > > >  kernel/power/power.h |   1 +
> > > > > >  kernel/power/swap.c  | 215 ++++++++++++++++++++++++++++++++++++++++++++++++---
> > > > > >  2 files changed, 205 insertions(+), 11 deletions(-)
> [...snip]
> > > > > >  /* kernel/power/hibernate.c */
> > > > > >  extern int swsusp_check(void);
> > > > > > diff --git a/kernel/power/swap.c b/kernel/power/swap.c
> > > > > > index c2bcf97..2b6b3d0 100644
> > > > > > --- a/kernel/power/swap.c
> > > > > > +++ b/kernel/power/swap.c
> [...snip]
> > > > > > @@ -1069,18 +1171,42 @@ static int load_image(struct swap_map_handle *handle,
> > > > > >  	if (!m)
> > > > > >  		m = 1;
> > > > > >  	nr_pages = 0;
> > > > > > +	crypto_page_idx = 0;
> > > > > > +	if (handle->crypto) {
> > > > > > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > > > > > +		if (!crypt_buf)
> > > > > > +			return -ENOMEM;
> > > > > > +	}
> > > > > >  	start = ktime_get();
> > > > > >  	for ( ; ; ) {
> > > > > >  		ret = snapshot_write_next(snapshot);
> > > > > >  		if (ret <= 0)
> > > > > >  			break;
> > > > > > -		ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > > > > +		if (handle->crypto)
> > > > > > +			ret = swap_read_page(handle, crypt_buf, &hb);
> > > > > > +		else
> > > > > > +			ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > > > >  		if (ret)
> > > > > >  			break;
> > > > > >  		if (snapshot->sync_read)
> > > > > >  			ret = hib_wait_io(&hb);
> 
> In snapshot_write_next(), the logic will clean the snapshot->sync_read
> when the buffer page doesn't equal to the original page. Which means
> that the page can be read by asynchronous block io. Otherwise, kernel
> calls hib_wait_io() to wait until the block io was done.
> 
Yes, you are right, I missed the asynchronous block io in non-compression
case.
> > > > > >  		if (ret)
> > > > > >  			break;
> > > > > > +		if (handle->crypto) {
> > > > > > +			/*
> > > > > > +			 * Need a decryption for the
> > > > > > +			 * data read from the block
> > > > > > +			 * device.
> > > > > > +			 */
> > > > > > +			ret = crypto_data(crypt_buf, PAGE_SIZE,
> > > > > > +					  data_of(*snapshot),
> > > > > > +					  PAGE_SIZE,
> > > > > > +					  false,
> > > > > > +					  crypto_page_idx);
> > > > > > +			if (ret)
> > > > > > +				break;
> > > > > > +			crypto_page_idx++;
> > > > > > +		}
> 
> The decryption is here in the for-loop. But maybe the page is still in
> the block io queue for waiting the batch read? The page content is not
> really read to memory when the crypto_data be run? 
> 
Yes, it is possible.
> > > > > >  		if (!(nr_pages % m))
> > > > > >  			pr_info("Image loading progress: %3d%%\n",
> > > > > >  				nr_pages / m * 10);
>                 nr_pages++;
>         }
>         err2 = hib_wait_io(&hb);
>         stop = ktime_get();
> 
> When the for-loop is break, the above hib_wait_io(&hb) guarantees that
> all asynchronous block io are done. Then all pages are read to memory.
> 
> I think that the decryption code must be moved after for-loop be break.
> Or there have any callback hook in the asynchronous block io that we
> can put the encryption code after the block io read the page.
> 
Yes, we can move the decryptino code here. Another thought came to my
mind, how about disabling the asynchronous block io in non-compression
case if encryption hibernation is enabled(because encryption hibernation
is not using the  asynchronous block io and it has to traverse each page
one by one, so asynchronous block io does not bring benefit to encryption
hibernation in non-compression case)? Or do I miss something?

Best,
Yu
> Thanks a lot!
> Joey Lee
joeyli July 12, 2018, 10:10 a.m. UTC | #7
Hi Yu Chen, 

Sorry for my delay...

On Fri, Jul 06, 2018 at 11:28:56PM +0800, Yu Chen wrote:
> Hi Joey Lee,
> On Fri, Jun 29, 2018 at 08:59:43PM +0800, joeyli wrote:
> > On Thu, Jun 28, 2018 at 10:52:07PM +0800, Yu Chen wrote:
> > > Hi,
> > > On Thu, Jun 28, 2018 at 10:28:56PM +0800, joeyli wrote:
> > > > On Thu, Jun 28, 2018 at 09:50:17PM +0800, Yu Chen wrote:
> > > > > Hi,
> > > > > On Thu, Jun 28, 2018 at 09:07:20PM +0800, joeyli wrote:
> > > > > > Hi Chen Yu,
> > > > > > 
> > > > > > On Wed, Jun 20, 2018 at 05:40:32PM +0800, Chen Yu wrote:
> > > > > > > Use the helper functions introduced previously to encrypt
> > > > > > > the page data before they are submitted to the block device.
> > > > > > > Besides, for the case of hibernation compression, the data
> > > > > > > are firstly compressed and then encrypted, and vice versa
> > > > > > > for the resume process.
> > > > > > >
> > > > > > 
> > > > > > I want to suggest my solution that it direct signs/encrypts the
> > > > > > memory snapshot image. This solution is already shipped with
> > > > > > SLE12 a couple of years:
> > > > > > 
> > > > > > https://github.com/joeyli/linux-s4sign/commits/s4sign-hmac-encrypted-key-v0.2-v4.17-rc3
> > > > > > 
> > > > > I did not see image page encryption in above link, if I understand
> > > > 
> > > > PM / hibernate: Generate and verify signature for snapshot image
> > > > https://github.com/joeyli/linux-s4sign/commit/bae39460393ada4c0226dd07cd5e3afcef86b71f
> > > >
> > > > PM / hibernate: snapshot image encryption
> > > > https://github.com/joeyli/linux-s4sign/commit/6a9a0113bb221c036ebd0f6321b7191283fe4929
> > > >
> > > > The above patches sign and encrypt the data pages in snapshot image.
> > > > It puts the signature to header.
> > > >
> > > It looks like your signature code can be simplyly added on top of the
> > > solution we proposed here, how about we collaborating on this task?
> > 
> > OK, I will base on your user key solution to respin my signature patches.
> >  
> > > just my 2 cents, 
> > > 1. The cryption code should be indepent of the snapshot code, and
> > >    this is why we implement it as a kernel module for that in PATCH[1/3].
> > 
> > Why the cryption code must be indepent of snapshot code?
> >
> Modules can be easier to be maintained and customized/improved in the future IMO..

hm... currently I didn't see apparent benefit on this...

About modularization, is it possible to separate the key handler code
from crypto_hibernation.c? Otherwise the snapshot signature needs
to depend on encrypt function.

> > > 2. There's no need to traverse the snapshot image twice, if the
> > >    image is large(there's requirement on servers now) we can
> > >    simplyly do the encryption before the disk IO, and this is
> > >    why PATCH[2/3] looks like this.
> > 
> > If the encryption solution is only for block device, then the uswsusp
> > interface must be locked-down when kernel is in locked mode:
> > 
> > uswsusp: Disable when the kernel is locked down
> > https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/commit/?h=lockdown-20180410&id=8732c1663d7c0305ae01ba5a1ee4d2299b7b4612
> > 
> > I still suggest to keep the solution to direct encript the snapshot
> > image for uswsusp because the snapshot can be encrypted by kernel
> > before user space get it.
> > 
> > I mean that if the uswsusp be used, then kernel direct encrypts the
> > snapshot image, otherwise kernel encrypts pages before block io.
> > 
> I did not quite get the point, if the kernel has been locked down,
> then the uswsusp is disabled, why the kernel encrypts the snapshot
> for uswsusp?

There have some functions be locked-down because there have no
appropriate mechanisms to check the integrity of writing data. If
the snapshot image can be encrypted and authentication, then the
uswsusp interface is avaiable for userspace. We do not need to lock
it down.

> > On the other hand, I have a question about asynchronous block io.
> > Please see below...
> > 
> Okay.
> > > > > > > Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > > > > Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > > > > Cc: Pavel Machek <pavel@ucw.cz>
> > > > > > > Cc: Len Brown <len.brown@intel.com>
> > > > > > > Cc: Borislav Petkov <bp@alien8.de>
> > > > > > > Cc: "Lee, Chun-Yi" <jlee@suse.com>
> > > > > > > Cc: linux-pm@vger.kernel.org
> > > > > > > Cc: linux-kernel@vger.kernel.org
> > > > > > > Signed-off-by: Chen Yu <yu.c.chen@intel.com>
> > > > > > > ---
> > > > > > >  kernel/power/power.h |   1 +
> > > > > > >  kernel/power/swap.c  | 215 ++++++++++++++++++++++++++++++++++++++++++++++++---
> > > > > > >  2 files changed, 205 insertions(+), 11 deletions(-)
> > [...snip]
> > > > > > >  /* kernel/power/hibernate.c */
> > > > > > >  extern int swsusp_check(void);
> > > > > > > diff --git a/kernel/power/swap.c b/kernel/power/swap.c
> > > > > > > index c2bcf97..2b6b3d0 100644
> > > > > > > --- a/kernel/power/swap.c
> > > > > > > +++ b/kernel/power/swap.c
> > [...snip]
> > > > > > > @@ -1069,18 +1171,42 @@ static int load_image(struct swap_map_handle *handle,
> > > > > > >  	if (!m)
> > > > > > >  		m = 1;
> > > > > > >  	nr_pages = 0;
> > > > > > > +	crypto_page_idx = 0;
> > > > > > > +	if (handle->crypto) {
> > > > > > > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > > > > > > +		if (!crypt_buf)
> > > > > > > +			return -ENOMEM;
> > > > > > > +	}
> > > > > > >  	start = ktime_get();
> > > > > > >  	for ( ; ; ) {
> > > > > > >  		ret = snapshot_write_next(snapshot);
> > > > > > >  		if (ret <= 0)
> > > > > > >  			break;
> > > > > > > -		ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > > > > > +		if (handle->crypto)
> > > > > > > +			ret = swap_read_page(handle, crypt_buf, &hb);
> > > > > > > +		else
> > > > > > > +			ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > > > > >  		if (ret)
> > > > > > >  			break;
> > > > > > >  		if (snapshot->sync_read)
> > > > > > >  			ret = hib_wait_io(&hb);
> > 
> > In snapshot_write_next(), the logic will clean the snapshot->sync_read
> > when the buffer page doesn't equal to the original page. Which means
> > that the page can be read by asynchronous block io. Otherwise, kernel
> > calls hib_wait_io() to wait until the block io was done.
> > 
> Yes, you are right, I missed the asynchronous block io in non-compression
> case.
> > > > > > >  		if (ret)
> > > > > > >  			break;
> > > > > > > +		if (handle->crypto) {
> > > > > > > +			/*
> > > > > > > +			 * Need a decryption for the
> > > > > > > +			 * data read from the block
> > > > > > > +			 * device.
> > > > > > > +			 */
> > > > > > > +			ret = crypto_data(crypt_buf, PAGE_SIZE,
> > > > > > > +					  data_of(*snapshot),
> > > > > > > +					  PAGE_SIZE,
> > > > > > > +					  false,
> > > > > > > +					  crypto_page_idx);
> > > > > > > +			if (ret)
> > > > > > > +				break;
> > > > > > > +			crypto_page_idx++;
> > > > > > > +		}
> > 
> > The decryption is here in the for-loop. But maybe the page is still in
> > the block io queue for waiting the batch read? The page content is not
> > really read to memory when the crypto_data be run? 
> > 
> Yes, it is possible.
> > > > > > >  		if (!(nr_pages % m))
> > > > > > >  			pr_info("Image loading progress: %3d%%\n",
> > > > > > >  				nr_pages / m * 10);
> >                 nr_pages++;
> >         }
> >         err2 = hib_wait_io(&hb);
> >         stop = ktime_get();
> > 
> > When the for-loop is break, the above hib_wait_io(&hb) guarantees that
> > all asynchronous block io are done. Then all pages are read to memory.
> > 
> > I think that the decryption code must be moved after for-loop be break.
> > Or there have any callback hook in the asynchronous block io that we
> > can put the encryption code after the block io read the page.
> > 
> Yes, we can move the decryptino code here. Another thought came to my
> mind, how about disabling the asynchronous block io in non-compression
> case if encryption hibernation is enabled(because encryption hibernation
> is not using the  asynchronous block io and it has to traverse each page
> one by one, so asynchronous block io does not bring benefit to encryption
> hibernation in non-compression case)? Or do I miss something?
>

Disabling asynchronous block will cause IO performance drops. As I remeber
that it's a lot of drops. You can try it. I am not sure how many people
are still using non-compression mode.

I do not know too much on block layer API. Is it possible to encrypt each page
before writing to device from kernel?

Thanks
Joey Lee
Chen Yu July 13, 2018, 7:34 a.m. UTC | #8
Hi,
On Thu, Jul 12, 2018 at 06:10:37PM +0800, joeyli wrote:
> Hi Yu Chen, 
> 
> Sorry for my delay...
> 
> On Fri, Jul 06, 2018 at 11:28:56PM +0800, Yu Chen wrote:
> > Hi Joey Lee,
> > On Fri, Jun 29, 2018 at 08:59:43PM +0800, joeyli wrote:
> > > On Thu, Jun 28, 2018 at 10:52:07PM +0800, Yu Chen wrote:
> > > > Hi,
> > > > On Thu, Jun 28, 2018 at 10:28:56PM +0800, joeyli wrote:
> > > > > On Thu, Jun 28, 2018 at 09:50:17PM +0800, Yu Chen wrote:
> > > > > > Hi,
> > > > > > On Thu, Jun 28, 2018 at 09:07:20PM +0800, joeyli wrote:
> > > > > > > Hi Chen Yu,
> > > > > > > 
> > > > > > > On Wed, Jun 20, 2018 at 05:40:32PM +0800, Chen Yu wrote:
> > > > > > > > Use the helper functions introduced previously to encrypt
> > > > > > > > the page data before they are submitted to the block device.
> > > > > > > > Besides, for the case of hibernation compression, the data
> > > > > > > > are firstly compressed and then encrypted, and vice versa
> > > > > > > > for the resume process.
> > > > > > > >
> > > > > > > 
> > > > > > > I want to suggest my solution that it direct signs/encrypts the
> > > > > > > memory snapshot image. This solution is already shipped with
> > > > > > > SLE12 a couple of years:
> > > > > > > 
> > > > > > > https://github.com/joeyli/linux-s4sign/commits/s4sign-hmac-encrypted-key-v0.2-v4.17-rc3
> > > > > > > 
> > > > > > I did not see image page encryption in above link, if I understand
> > > > > 
> > > > > PM / hibernate: Generate and verify signature for snapshot image
> > > > > https://github.com/joeyli/linux-s4sign/commit/bae39460393ada4c0226dd07cd5e3afcef86b71f
> > > > >
> > > > > PM / hibernate: snapshot image encryption
> > > > > https://github.com/joeyli/linux-s4sign/commit/6a9a0113bb221c036ebd0f6321b7191283fe4929
> > > > >
> > > > > The above patches sign and encrypt the data pages in snapshot image.
> > > > > It puts the signature to header.
> > > > >
> > > > It looks like your signature code can be simplyly added on top of the
> > > > solution we proposed here, how about we collaborating on this task?
> > > 
> > > OK, I will base on your user key solution to respin my signature patches.
> > >  
> > > > just my 2 cents, 
> > > > 1. The cryption code should be indepent of the snapshot code, and
> > > >    this is why we implement it as a kernel module for that in PATCH[1/3].
> > > 
> > > Why the cryption code must be indepent of snapshot code?
> > >
> > Modules can be easier to be maintained and customized/improved in the future IMO..
> 
> hm... currently I didn't see apparent benefit on this...
> 
> About modularization, is it possible to separate the key handler code
> from crypto_hibernation.c? Otherwise the snapshot signature needs
> to depend on encrypt function.
> 
I understand.
It seems that we can reuse crypto_data() for the signature logic.
For example, add one parameter for crypto_data(..., crypto_mode)
the crypto_mode is a combination of ENCRYPT/DECRYPT/SIGNATURE_START/SIGNATURE_END,
and can be controled by sysfs. If SIGNATURE_START is enabled, the crypto_data()
invokes crypto_shash_update() to get "hmac(sha512)" hash, and if SIGNATURE_END
is enabled, crypto_shash_final() stores the final result in global buffer
struct hibernation_crypto.keys.digest[SNAPSHOT_DIGEST_SIZE] which can be
passed to the restore kernel, the same as the salt. Besides,
the swsusp_prepare_hash() in your code could be moved into
init_crypto_helper(), thus the signature key could also reuse
the same key passed from user space or via get_efi_secret_key().
In this way, the change in snapshot.c is minimal and the crypto
facilities could be maintained in one file.
> > > > 2. There's no need to traverse the snapshot image twice, if the
> > > >    image is large(there's requirement on servers now) we can
> > > >    simplyly do the encryption before the disk IO, and this is
> > > >    why PATCH[2/3] looks like this.
> > > 
> > > If the encryption solution is only for block device, then the uswsusp
> > > interface must be locked-down when kernel is in locked mode:
> > > 
> > > uswsusp: Disable when the kernel is locked down
> > > https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/commit/?h=lockdown-20180410&id=8732c1663d7c0305ae01ba5a1ee4d2299b7b4612
> > > 
> > > I still suggest to keep the solution to direct encript the snapshot
> > > image for uswsusp because the snapshot can be encrypted by kernel
> > > before user space get it.
> > > 
> > > I mean that if the uswsusp be used, then kernel direct encrypts the
> > > snapshot image, otherwise kernel encrypts pages before block io.
> > > 
> > I did not quite get the point, if the kernel has been locked down,
> > then the uswsusp is disabled, why the kernel encrypts the snapshot
> > for uswsusp?
> 
> There have some functions be locked-down because there have no
> appropriate mechanisms to check the integrity of writing data. If
> the snapshot image can be encrypted and authentication, then the
> uswsusp interface is avaiable for userspace. We do not need to lock
> it down.
Ok, I got your point. To be honest, I like your implementation to treat
the snapshot data seperately except that it might cause small overhead
when traversing large number of snapshot and make snapshot logic a little
coupling with crypto logic. Let me send our v2 using the crypto-before-blockio
for review and maybe change to your solution using the encapsulated APIs in
crypto_hibernate.c.
> 
> > > On the other hand, I have a question about asynchronous block io.
> > > Please see below...
> > > 
> > Okay.
> > > > > > > > Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > > > > > Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > > > > > > > Cc: Pavel Machek <pavel@ucw.cz>
> > > > > > > > Cc: Len Brown <len.brown@intel.com>
> > > > > > > > Cc: Borislav Petkov <bp@alien8.de>
> > > > > > > > Cc: "Lee, Chun-Yi" <jlee@suse.com>
> > > > > > > > Cc: linux-pm@vger.kernel.org
> > > > > > > > Cc: linux-kernel@vger.kernel.org
> > > > > > > > Signed-off-by: Chen Yu <yu.c.chen@intel.com>
> > > > > > > > ---
> > > > > > > >  kernel/power/power.h |   1 +
> > > > > > > >  kernel/power/swap.c  | 215 ++++++++++++++++++++++++++++++++++++++++++++++++---
> > > > > > > >  2 files changed, 205 insertions(+), 11 deletions(-)
> > > [...snip]
> > > > > > > >  /* kernel/power/hibernate.c */
> > > > > > > >  extern int swsusp_check(void);
> > > > > > > > diff --git a/kernel/power/swap.c b/kernel/power/swap.c
> > > > > > > > index c2bcf97..2b6b3d0 100644
> > > > > > > > --- a/kernel/power/swap.c
> > > > > > > > +++ b/kernel/power/swap.c
> > > [...snip]
> > > > > > > > @@ -1069,18 +1171,42 @@ static int load_image(struct swap_map_handle *handle,
> > > > > > > >  	if (!m)
> > > > > > > >  		m = 1;
> > > > > > > >  	nr_pages = 0;
> > > > > > > > +	crypto_page_idx = 0;
> > > > > > > > +	if (handle->crypto) {
> > > > > > > > +		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
> > > > > > > > +		if (!crypt_buf)
> > > > > > > > +			return -ENOMEM;
> > > > > > > > +	}
> > > > > > > >  	start = ktime_get();
> > > > > > > >  	for ( ; ; ) {
> > > > > > > >  		ret = snapshot_write_next(snapshot);
> > > > > > > >  		if (ret <= 0)
> > > > > > > >  			break;
> > > > > > > > -		ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > > > > > > +		if (handle->crypto)
> > > > > > > > +			ret = swap_read_page(handle, crypt_buf, &hb);
> > > > > > > > +		else
> > > > > > > > +			ret = swap_read_page(handle, data_of(*snapshot), &hb);
> > > > > > > >  		if (ret)
> > > > > > > >  			break;
> > > > > > > >  		if (snapshot->sync_read)
> > > > > > > >  			ret = hib_wait_io(&hb);
> > > 
> > > In snapshot_write_next(), the logic will clean the snapshot->sync_read
> > > when the buffer page doesn't equal to the original page. Which means
> > > that the page can be read by asynchronous block io. Otherwise, kernel
> > > calls hib_wait_io() to wait until the block io was done.
> > > 
> > Yes, you are right, I missed the asynchronous block io in non-compression
> > case.
> > > > > > > >  		if (ret)
> > > > > > > >  			break;
> > > > > > > > +		if (handle->crypto) {
> > > > > > > > +			/*
> > > > > > > > +			 * Need a decryption for the
> > > > > > > > +			 * data read from the block
> > > > > > > > +			 * device.
> > > > > > > > +			 */
> > > > > > > > +			ret = crypto_data(crypt_buf, PAGE_SIZE,
> > > > > > > > +					  data_of(*snapshot),
> > > > > > > > +					  PAGE_SIZE,
> > > > > > > > +					  false,
> > > > > > > > +					  crypto_page_idx);
> > > > > > > > +			if (ret)
> > > > > > > > +				break;
> > > > > > > > +			crypto_page_idx++;
> > > > > > > > +		}
> > > 
> > > The decryption is here in the for-loop. But maybe the page is still in
> > > the block io queue for waiting the batch read? The page content is not
> > > really read to memory when the crypto_data be run? 
> > > 
> > Yes, it is possible.
> > > > > > > >  		if (!(nr_pages % m))
> > > > > > > >  			pr_info("Image loading progress: %3d%%\n",
> > > > > > > >  				nr_pages / m * 10);
> > >                 nr_pages++;
> > >         }
> > >         err2 = hib_wait_io(&hb);
> > >         stop = ktime_get();
> > > 
> > > When the for-loop is break, the above hib_wait_io(&hb) guarantees that
> > > all asynchronous block io are done. Then all pages are read to memory.
> > > 
> > > I think that the decryption code must be moved after for-loop be break.
> > > Or there have any callback hook in the asynchronous block io that we
> > > can put the encryption code after the block io read the page.
> > > 
> > Yes, we can move the decryptino code here. Another thought came to my
> > mind, how about disabling the asynchronous block io in non-compression
> > case if encryption hibernation is enabled(because encryption hibernation
> > is not using the  asynchronous block io and it has to traverse each page
> > one by one, so asynchronous block io does not bring benefit to encryption
> > hibernation in non-compression case)? Or do I miss something?
> >
> 
> Disabling asynchronous block will cause IO performance drops. As I remeber
> that it's a lot of drops. You can try it. I am not sure how many people
> are still using non-compression mode.
Let me compare the speed of using non-compression mode.
> 
> I do not know too much on block layer API. Is it possible to encrypt each page
> before writing to device from kernel?
It might be hard due to the fact that the IO scheduler might combine different
IO request together and we could not figure out which IO request sections should
be encrypted.
> 
> Thanks
> Joey Lee
joeyli July 18, 2018, 3:48 p.m. UTC | #9
On Fri, Jul 13, 2018 at 03:34:25PM +0800, Yu Chen wrote:
> Hi,
> On Thu, Jul 12, 2018 at 06:10:37PM +0800, joeyli wrote:
> > Hi Yu Chen, 
> > 
> > Sorry for my delay...
> > 
> > On Fri, Jul 06, 2018 at 11:28:56PM +0800, Yu Chen wrote:
[...snip]
> > > > Why the cryption code must be indepent of snapshot code?
> > > >
> > > Modules can be easier to be maintained and customized/improved in the future IMO..
> > 
> > hm... currently I didn't see apparent benefit on this...
> > 
> > About modularization, is it possible to separate the key handler code
> > from crypto_hibernation.c? Otherwise the snapshot signature needs
> > to depend on encrypt function.
> > 
> I understand.
> It seems that we can reuse crypto_data() for the signature logic.
> For example, add one parameter for crypto_data(..., crypto_mode)
> the crypto_mode is a combination of ENCRYPT/DECRYPT/SIGNATURE_START/SIGNATURE_END,
> and can be controled by sysfs. If SIGNATURE_START is enabled, the crypto_data()
> invokes crypto_shash_update() to get "hmac(sha512)" hash, and if SIGNATURE_END
> is enabled, crypto_shash_final() stores the final result in global buffer

I agree that the swsusp_prepare and hmac-hash codes can be extract to
crypto_hibernation.  

> struct hibernation_crypto.keys.digest[SNAPSHOT_DIGEST_SIZE] which can be
> passed to the restore kernel, the same as the salt. Besides,

The salt is stored in the swsusp_header and put in swap. What I want
is that the signature of snapshot should be keep in the snapshot header
as my original design in patch. The benefit is that the snapshot with
signature can be stored to any place but not just swap. The user space
can take snapshot image with signature to anywhere.

> the swsusp_prepare_hash() in your code could be moved into
> init_crypto_helper(), thus the signature key could also reuse
> the same key passed from user space or via get_efi_secret_key().
> In this way, the change in snapshot.c is minimal and the crypto
> facilities could be maintained in one file.

I agree. But as I mentioned in that mail, the key handler codes
in hibernation_crypto() should be extract to another C file to avoid
the logic is mixed with the crypto code. The benefit is that we can
add more key sources like encrypted key or EFI key in the future. 

> > > > > 2. There's no need to traverse the snapshot image twice, if the
> > > > >    image is large(there's requirement on servers now) we can
> > > > >    simplyly do the encryption before the disk IO, and this is
> > > > >    why PATCH[2/3] looks like this.
> > > > 
> > > > If the encryption solution is only for block device, then the uswsusp
> > > > interface must be locked-down when kernel is in locked mode:
> > > > 
> > > > uswsusp: Disable when the kernel is locked down
> > > > https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/commit/?h=lockdown-20180410&id=8732c1663d7c0305ae01ba5a1ee4d2299b7b4612
> > > > 
> > > > I still suggest to keep the solution to direct encript the snapshot
> > > > image for uswsusp because the snapshot can be encrypted by kernel
> > > > before user space get it.
> > > > 
> > > > I mean that if the uswsusp be used, then kernel direct encrypts the
> > > > snapshot image, otherwise kernel encrypts pages before block io.
> > > > 
> > > I did not quite get the point, if the kernel has been locked down,
> > > then the uswsusp is disabled, why the kernel encrypts the snapshot
> > > for uswsusp?
> > 
> > There have some functions be locked-down because there have no
> > appropriate mechanisms to check the integrity of writing data. If
> > the snapshot image can be encrypted and authentication, then the
> > uswsusp interface is avaiable for userspace. We do not need to lock
> > it down.
> Ok, I got your point. To be honest, I like your implementation to treat
> the snapshot data seperately except that it might cause small overhead
> when traversing large number of snapshot and make snapshot logic a little
> coupling with crypto logic. Let me send our v2 using the crypto-before-blockio
> for review and maybe change to your solution using the encapsulated APIs in
> crypto_hibernate.c.

Because sometimes I stick on other topics...

If you are urgent for pushing your encryption solution. Please do not
consider too much on signature check. Just complete your patches and push
to kernel mainline. I will follow your result to respin my signature work.

Thanks
Joey Lee
Chen Yu July 19, 2018, 9:16 a.m. UTC | #10
On Wed, Jul 18, 2018 at 11:48:19PM +0800, joeyli wrote:
> On Fri, Jul 13, 2018 at 03:34:25PM +0800, Yu Chen wrote:
> > Hi,
> > On Thu, Jul 12, 2018 at 06:10:37PM +0800, joeyli wrote:
> > > Hi Yu Chen, 
> > > 
> > > Sorry for my delay...
> > > 
> > > On Fri, Jul 06, 2018 at 11:28:56PM +0800, Yu Chen wrote:
> [...snip]
> > > > > Why the cryption code must be indepent of snapshot code?
> > > > >
> > > > Modules can be easier to be maintained and customized/improved in the future IMO..
> > > 
> > > hm... currently I didn't see apparent benefit on this...
> > > 
> > > About modularization, is it possible to separate the key handler code
> > > from crypto_hibernation.c? Otherwise the snapshot signature needs
> > > to depend on encrypt function.
> > > 
> > I understand.
> > It seems that we can reuse crypto_data() for the signature logic.
> > For example, add one parameter for crypto_data(..., crypto_mode)
> > the crypto_mode is a combination of ENCRYPT/DECRYPT/SIGNATURE_START/SIGNATURE_END,
> > and can be controled by sysfs. If SIGNATURE_START is enabled, the crypto_data()
> > invokes crypto_shash_update() to get "hmac(sha512)" hash, and if SIGNATURE_END
> > is enabled, crypto_shash_final() stores the final result in global buffer
> 
> I agree that the swsusp_prepare and hmac-hash codes can be extract to
> crypto_hibernation.  
> 
> > struct hibernation_crypto.keys.digest[SNAPSHOT_DIGEST_SIZE] which can be
> > passed to the restore kernel, the same as the salt. Besides,
> 
> The salt is stored in the swsusp_header and put in swap. What I want
> is that the signature of snapshot should be keep in the snapshot header
> as my original design in patch. The benefit is that the snapshot with
> signature can be stored to any place but not just swap. The user space
> can take snapshot image with signature to anywhere.
>
Okay, got it.
> > the swsusp_prepare_hash() in your code could be moved into
> > init_crypto_helper(), thus the signature key could also reuse
> > the same key passed from user space or via get_efi_secret_key().
> > In this way, the change in snapshot.c is minimal and the crypto
> > facilities could be maintained in one file.
> 
> I agree. But as I mentioned in that mail, the key handler codes
> in hibernation_crypto() should be extract to another C file to avoid
> the logic is mixed with the crypto code. The benefit is that we can
> add more key sources like encrypted key or EFI key in the future. 
> 
Ok, will extract the key handler code from this file.
> > > > > > 2. There's no need to traverse the snapshot image twice, if the
> > > > > >    image is large(there's requirement on servers now) we can
> > > > > >    simplyly do the encryption before the disk IO, and this is
> > > > > >    why PATCH[2/3] looks like this.
> > > > > 
> > > > > If the encryption solution is only for block device, then the uswsusp
> > > > > interface must be locked-down when kernel is in locked mode:
> > > > > 
> > > > > uswsusp: Disable when the kernel is locked down
> > > > > https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/commit/?h=lockdown-20180410&id=8732c1663d7c0305ae01ba5a1ee4d2299b7b4612
> > > > > 
> > > > > I still suggest to keep the solution to direct encript the snapshot
> > > > > image for uswsusp because the snapshot can be encrypted by kernel
> > > > > before user space get it.
> > > > > 
> > > > > I mean that if the uswsusp be used, then kernel direct encrypts the
> > > > > snapshot image, otherwise kernel encrypts pages before block io.
> > > > > 
> > > > I did not quite get the point, if the kernel has been locked down,
> > > > then the uswsusp is disabled, why the kernel encrypts the snapshot
> > > > for uswsusp?
> > > 
> > > There have some functions be locked-down because there have no
> > > appropriate mechanisms to check the integrity of writing data. If
> > > the snapshot image can be encrypted and authentication, then the
> > > uswsusp interface is avaiable for userspace. We do not need to lock
> > > it down.
> > Ok, I got your point. To be honest, I like your implementation to treat
> > the snapshot data seperately except that it might cause small overhead
> > when traversing large number of snapshot and make snapshot logic a little
> > coupling with crypto logic. Let me send our v2 using the crypto-before-blockio
> > for review and maybe change to your solution using the encapsulated APIs in
> > crypto_hibernate.c.
> 
> Because sometimes I stick on other topics...
> 
> If you are urgent for pushing your encryption solution. Please do not
> consider too much on signature check. Just complete your patches and push
> to kernel mainline. I will follow your result to respin my signature work.
> 
Thanks, I've just sent out another version before I noticed your
comment yesterday, let me address your concern in v3,
but please feel free to comment on v2.
Thanks,
Yu

> Thanks
> Joey Lee
diff mbox

Patch

diff --git a/kernel/power/power.h b/kernel/power/power.h
index 660aac3..637695c 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -207,6 +207,7 @@  extern int swsusp_swap_in_use(void);
 #define SF_PLATFORM_MODE	1
 #define SF_NOCOMPRESS_MODE	2
 #define SF_CRC32_MODE	        4
+#define SF_ENCRYPT_MODE		8
 
 /* kernel/power/hibernate.c */
 extern int swsusp_check(void);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index c2bcf97..2b6b3d0 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -102,14 +102,16 @@  struct swap_map_handle {
 	unsigned int k;
 	unsigned long reqd_free_pages;
 	u32 crc32;
+	bool crypto;
 };
 
 struct swsusp_header {
 	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
-	              sizeof(u32)];
+			sizeof(u32) - HIBERNATE_SALT_BYTES];
 	u32	crc32;
 	sector_t image;
 	unsigned int flags;	/* Flags to pass to the "boot" kernel */
+	char salt[HIBERNATE_SALT_BYTES];
 	char	orig_sig[10];
 	char	sig[10];
 } __packed;
@@ -127,6 +129,53 @@  struct swsusp_extent {
 	unsigned long end;
 };
 
+/* For encryption/decryption. */
+static struct hibernation_crypto *hibernation_crypto_ops;
+
+void set_hibernation_ops(struct hibernation_crypto *ops)
+{
+	hibernation_crypto_ops = ops;
+}
+EXPORT_SYMBOL_GPL(set_hibernation_ops);
+
+static int crypto_data(const char *inbuf,
+			    int inlen,
+			    char *outbuf,
+			    int outlen,
+			    bool encrypt,
+			    int page_idx)
+{
+	if (hibernation_crypto_ops &&
+	    hibernation_crypto_ops->crypto_data)
+		return hibernation_crypto_ops->crypto_data(inbuf,
+			inlen, outbuf, outlen, encrypt, page_idx);
+	else
+		return -EINVAL;
+}
+
+static void crypto_save(void *outbuf)
+{
+	if (hibernation_crypto_ops &&
+	    hibernation_crypto_ops->save)
+		hibernation_crypto_ops->save(outbuf);
+}
+
+static void crypto_restore(void *inbuf)
+{
+	if (hibernation_crypto_ops &&
+	    hibernation_crypto_ops->restore)
+		hibernation_crypto_ops->restore(inbuf);
+}
+
+static int crypto_init(bool suspend)
+{
+	if (hibernation_crypto_ops &&
+	    hibernation_crypto_ops->init)
+		return hibernation_crypto_ops->init(suspend);
+	else
+		return -EINVAL;
+}
+
 static struct rb_root swsusp_extents = RB_ROOT;
 
 static int swsusp_extents_insert(unsigned long swap_offset)
@@ -318,6 +367,10 @@  static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 		swsusp_header->flags = flags;
 		if (flags & SF_CRC32_MODE)
 			swsusp_header->crc32 = handle->crc32;
+		if (handle->crypto) {
+			swsusp_header->flags |= SF_ENCRYPT_MODE;
+			crypto_save((void *)swsusp_header->salt);
+		}
 		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 				      swsusp_resume_block, swsusp_header, NULL);
 	} else {
@@ -535,11 +588,12 @@  static int save_image(struct swap_map_handle *handle,
 {
 	unsigned int m;
 	int ret;
-	int nr_pages;
+	int nr_pages, crypto_page_idx;
 	int err2;
 	struct hib_bio_batch hb;
 	ktime_t start;
 	ktime_t stop;
+	void *tmp = NULL, *crypt_buf = NULL;
 
 	hib_init_batch(&hb);
 
@@ -549,12 +603,33 @@  static int save_image(struct swap_map_handle *handle,
 	if (!m)
 		m = 1;
 	nr_pages = 0;
+	crypto_page_idx = 0;
+	if (handle->crypto) {
+		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
+		if (!crypt_buf)
+			return -ENOMEM;
+	}
+
 	start = ktime_get();
 	while (1) {
 		ret = snapshot_read_next(snapshot);
 		if (ret <= 0)
 			break;
-		ret = swap_write_page(handle, data_of(*snapshot), &hb);
+		tmp = data_of(*snapshot);
+		if (handle->crypto) {
+			/* Encryption before submit_io.*/
+			ret = crypto_data(data_of(*snapshot),
+					  PAGE_SIZE,
+					  crypt_buf,
+					  PAGE_SIZE,
+					  true,
+					  crypto_page_idx);
+			if (ret)
+				goto out;
+			crypto_page_idx++;
+			tmp = crypt_buf;
+		}
+		ret = swap_write_page(handle, tmp, &hb);
 		if (ret)
 			break;
 		if (!(nr_pages % m))
@@ -569,6 +644,9 @@  static int save_image(struct swap_map_handle *handle,
 	if (!ret)
 		pr_info("Image saving done\n");
 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
+ out:
+	if (crypt_buf)
+		free_page((unsigned long)crypt_buf);
 	return ret;
 }
 
@@ -671,7 +749,7 @@  static int save_image_lzo(struct swap_map_handle *handle,
 {
 	unsigned int m;
 	int ret = 0;
-	int nr_pages;
+	int nr_pages, crypto_page_idx;
 	int err2;
 	struct hib_bio_batch hb;
 	ktime_t start;
@@ -767,6 +845,7 @@  static int save_image_lzo(struct swap_map_handle *handle,
 	if (!m)
 		m = 1;
 	nr_pages = 0;
+	crypto_page_idx = 0;
 	start = ktime_get();
 	for (;;) {
 		for (thr = 0; thr < nr_threads; thr++) {
@@ -835,7 +914,25 @@  static int save_image_lzo(struct swap_map_handle *handle,
 			for (off = 0;
 			     off < LZO_HEADER + data[thr].cmp_len;
 			     off += PAGE_SIZE) {
-				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
+				if (handle->crypto) {
+					/*
+					 * Encrypt the compressed data
+					 * before we write them to the
+					 * block device.
+					 */
+					ret = crypto_data(data[thr].cmp + off,
+							  PAGE_SIZE,
+							  page,
+							  PAGE_SIZE,
+							  true,
+							  crypto_page_idx);
+					if (ret)
+						goto out_finish;
+					crypto_page_idx++;
+				} else {
+					memcpy(page, data[thr].cmp + off,
+						PAGE_SIZE);
+				}
 
 				ret = swap_write_page(handle, page, &hb);
 				if (ret)
@@ -909,6 +1006,7 @@  int swsusp_write(unsigned int flags)
 	int error;
 
 	pages = snapshot_get_image_size();
+	memset(&handle, 0, sizeof(struct swap_map_handle));
 	error = get_swap_writer(&handle);
 	if (error) {
 		pr_err("Cannot get swap writer\n");
@@ -922,6 +1020,9 @@  int swsusp_write(unsigned int flags)
 		}
 	}
 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
+	if (!crypto_init(true))
+		/* The image needs to be encrypted. */
+		handle.crypto = true;
 	error = snapshot_read_next(&snapshot);
 	if (error < PAGE_SIZE) {
 		if (error >= 0)
@@ -1059,7 +1160,8 @@  static int load_image(struct swap_map_handle *handle,
 	ktime_t stop;
 	struct hib_bio_batch hb;
 	int err2;
-	unsigned nr_pages;
+	unsigned nr_pages, crypto_page_idx;
+	void *crypt_buf = NULL;
 
 	hib_init_batch(&hb);
 
@@ -1069,18 +1171,42 @@  static int load_image(struct swap_map_handle *handle,
 	if (!m)
 		m = 1;
 	nr_pages = 0;
+	crypto_page_idx = 0;
+	if (handle->crypto) {
+		crypt_buf = (void *)get_zeroed_page(GFP_KERNEL);
+		if (!crypt_buf)
+			return -ENOMEM;
+	}
 	start = ktime_get();
 	for ( ; ; ) {
 		ret = snapshot_write_next(snapshot);
 		if (ret <= 0)
 			break;
-		ret = swap_read_page(handle, data_of(*snapshot), &hb);
+		if (handle->crypto)
+			ret = swap_read_page(handle, crypt_buf, &hb);
+		else
+			ret = swap_read_page(handle, data_of(*snapshot), &hb);
 		if (ret)
 			break;
 		if (snapshot->sync_read)
 			ret = hib_wait_io(&hb);
 		if (ret)
 			break;
+		if (handle->crypto) {
+			/*
+			 * Need a decryption for the
+			 * data read from the block
+			 * device.
+			 */
+			ret = crypto_data(crypt_buf, PAGE_SIZE,
+					  data_of(*snapshot),
+					  PAGE_SIZE,
+					  false,
+					  crypto_page_idx);
+			if (ret)
+				break;
+			crypto_page_idx++;
+		}
 		if (!(nr_pages % m))
 			pr_info("Image loading progress: %3d%%\n",
 				nr_pages / m * 10);
@@ -1097,6 +1223,8 @@  static int load_image(struct swap_map_handle *handle,
 			ret = -ENODATA;
 	}
 	swsusp_show_speed(start, stop, nr_to_read, "Read");
+	if (crypt_buf)
+		free_page((unsigned long)crypt_buf);
 	return ret;
 }
 
@@ -1164,7 +1292,7 @@  static int load_image_lzo(struct swap_map_handle *handle,
 	struct hib_bio_batch hb;
 	ktime_t start;
 	ktime_t stop;
-	unsigned nr_pages;
+	unsigned nr_pages, crypto_page_idx;
 	size_t off;
 	unsigned i, thr, run_threads, nr_threads;
 	unsigned ring = 0, pg = 0, ring_size = 0,
@@ -1173,6 +1301,7 @@  static int load_image_lzo(struct swap_map_handle *handle,
 	unsigned char **page = NULL;
 	struct dec_data *data = NULL;
 	struct crc_data *crc = NULL;
+	void *first_page = NULL;
 
 	hib_init_batch(&hb);
 
@@ -1278,6 +1407,18 @@  static int load_image_lzo(struct swap_map_handle *handle,
 	}
 	want = ring_size = i;
 
+	/*
+	 * The first page of data[thr] contains the length of
+	 * compressed data, this page should not mess up the
+	 * read buffer, so we allocate a separate page for it.
+	 */
+	if (handle->crypto) {
+		first_page = (void *)get_zeroed_page(GFP_KERNEL);
+		if (!first_page) {
+			ret = -ENOMEM;
+			goto out_clean;
+		}
+	}
 	pr_info("Using %u thread(s) for decompression\n", nr_threads);
 	pr_info("Loading and decompressing image data (%u pages)...\n",
 		nr_to_read);
@@ -1285,6 +1426,7 @@  static int load_image_lzo(struct swap_map_handle *handle,
 	if (!m)
 		m = 1;
 	nr_pages = 0;
+	crypto_page_idx = 0;
 	start = ktime_get();
 
 	ret = snapshot_write_next(snapshot);
@@ -1336,7 +1478,24 @@  static int load_image_lzo(struct swap_map_handle *handle,
 		}
 
 		for (thr = 0; have && thr < nr_threads; thr++) {
-			data[thr].cmp_len = *(size_t *)page[pg];
+			if (handle->crypto) {
+				/*
+				 * Need to decrypt the first page
+				 * of each data[thr], which contains
+				 * the compressed data length.
+				 */
+				ret = crypto_data(page[pg],
+						  PAGE_SIZE,
+						  first_page,
+						  PAGE_SIZE,
+						  false,
+						  crypto_page_idx);
+				if (ret)
+					goto out_finish;
+				data[thr].cmp_len = *(size_t *)first_page;
+			} else {
+				data[thr].cmp_len = *(size_t *)page[pg];
+			}
 			if (unlikely(!data[thr].cmp_len ||
 			             data[thr].cmp_len >
 			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
@@ -1358,8 +1517,26 @@  static int load_image_lzo(struct swap_map_handle *handle,
 			for (off = 0;
 			     off < LZO_HEADER + data[thr].cmp_len;
 			     off += PAGE_SIZE) {
-				memcpy(data[thr].cmp + off,
-				       page[pg], PAGE_SIZE);
+				if (handle->crypto) {
+					/*
+					 * Decrypt the compressed data
+					 * and leverage the decompression
+					 * threads to get it done.
+					 */
+					ret = crypto_data(page[pg],
+							  PAGE_SIZE,
+							  data[thr].cmp + off,
+							  PAGE_SIZE,
+							  false,
+							  crypto_page_idx);
+					if (ret)
+						goto out_finish;
+					crypto_page_idx++;
+				} else {
+					memcpy(data[thr].cmp + off,
+						page[pg], PAGE_SIZE);
+
+				}
 				have--;
 				want++;
 				if (++pg >= ring_size)
@@ -1452,6 +1629,8 @@  static int load_image_lzo(struct swap_map_handle *handle,
 out_clean:
 	for (i = 0; i < ring_size; i++)
 		free_page((unsigned long)page[i]);
+	if (first_page)
+		free_page((unsigned long)first_page);
 	if (crc) {
 		if (crc->thr)
 			kthread_stop(crc->thr);
@@ -1482,6 +1661,7 @@  int swsusp_read(unsigned int *flags_p)
 	struct swsusp_info *header;
 
 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
+	memset(&handle, 0, sizeof(struct swap_map_handle));
 	error = snapshot_write_next(&snapshot);
 	if (error < PAGE_SIZE)
 		return error < 0 ? error : -EFAULT;
@@ -1489,6 +1669,16 @@  int swsusp_read(unsigned int *flags_p)
 	error = get_swap_reader(&handle, flags_p);
 	if (error)
 		goto end;
+	if (*flags_p & SF_ENCRYPT_MODE) {
+		error = crypto_init(false);
+		if (!error) {
+			/* The image has been encrypted. */
+			handle.crypto = true;
+		} else {
+			pr_err("Failed to init cipher during resume.\n");
+			goto end;
+		}
+	}
 	if (!error)
 		error = swap_read_page(&handle, header, NULL);
 	if (!error) {
@@ -1526,6 +1716,9 @@  int swsusp_check(void)
 
 		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
 			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
+			/* Read salt passed from previous kernel. */
+			if (swsusp_header->flags & SF_ENCRYPT_MODE)
+				crypto_restore((void *)&swsusp_header->salt);
 			/* Reset swap signature now */
 			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 						swsusp_resume_block,