[v3] btrfs-progs: fix page align issue for lzo compress in restore
diff mbox

Message ID 1411376306-10228-1-git-send-email-guihc.fnst@cn.fujitsu.com
State Not Applicable
Headers show

Commit Message

Gui Hecheng Sept. 22, 2014, 8:58 a.m. UTC
When runing restore under lzo compression, "bad compress length"
problems are encountered.
It is because there is a page align problem with the @decompress_lzo,
as follows:
		|------| |----|-| |------|...|------|
		  page         ^    page       page
			       |
			  3 bytes left

	When lzo compress pages im RAM, lzo will ensure that
	the 4 bytes len will be in one page as a whole.
	There is a situation that 3 (or less) bytes are left
	at the end of a page, and then the 4 bytes len is
	stored at the start of the next page.
	But the @decompress_lzo doesn't goto the start of
	the next page and continue to read the next 4 bytes
	which is across two pages, so a random value is fetched
	as a "bad compress length".

So we check page alignment every time before we are going to
fetch the next @len and after the former piece of data is decompressed.
If the current page that we reach has less than 4 bytes left,
then we should fetch the next @len at the start of next page.

Signed-off-by: Gui Hecheng <guihc.fnst@cn.fujitsu.com>
Reviewed-by: Marc Dietrich <marvin24@gmx.de>
---
changelog
	v1->v2: adopt alignment check method suggested by Marc
	v2->v3: make code more readable
---
 cmds-restore.c | 27 ++++++++++++++++++++++++++-
 1 file changed, 26 insertions(+), 1 deletion(-)

Comments

David Sterba Sept. 22, 2014, 1:41 p.m. UTC | #1
On Mon, Sep 22, 2014 at 04:58:26PM +0800, Gui Hecheng wrote:
> So we check page alignment every time before we are going to
> fetch the next @len and after the former piece of data is decompressed.
> If the current page that we reach has less than 4 bytes left,
> then we should fetch the next @len at the start of next page.

Thanks for the fix.

> --- a/cmds-restore.c
> +++ b/cmds-restore.c
> @@ -57,6 +57,9 @@ static int dry_run = 0;
>  
>  #define LZO_LEN 4
>  #define PAGE_CACHE_SIZE 4096
> +#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
> +#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1)	\
> +							& PAGE_CACHE_MASK)

This is not type-safe, the PAGE_CACHE_SIZE should be unsigned long.

>  #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
>  
>  static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
> @@ -93,6 +96,28 @@ static inline size_t read_compress_length(unsigned char *buf)
>  	return le32_to_cpu(dlen);
>  }
>  
> +static void align_if_need(size_t *tot_in, size_t *in_len)
> +{
> +	int tot_in_aligned;
> +	int bytes_left;
> +
> +	tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);

size_t -> int, plus other tricks that happen inside the macro

> +	bytes_left = tot_in_aligned - *tot_in;

int = int - size_t

> +
> +	if (bytes_left >= LZO_LEN)
> +		return;
> +
> +	/*
> +	 * The LZO_LEN bytes is guaranteed to be
> +	 * in one page as a whole, so if a page
> +	 * has fewer than LZO_LEN bytes left,
> +	 * the LZO_LEN bytes should be fetched
> +	 * at the start of the next page
> +	 */

Nitpick, the comment can use the whole width of the line

	/*
	 * The LZO_LEN bytes is guaranteed to be in one page as a whole,
	 * so if a page has fewer than LZO_LEN bytes left, the LZO_LEN
	 * bytes should be fetched at the start of the next page
	 */

> +	*in_len += bytes_left;
> +	*tot_in = tot_in_aligned;
> +}
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gui Hecheng Sept. 23, 2014, 1:26 a.m. UTC | #2
On Mon, 2014-09-22 at 15:41 +0200, David Sterba wrote:
> On Mon, Sep 22, 2014 at 04:58:26PM +0800, Gui Hecheng wrote:
> > So we check page alignment every time before we are going to
> > fetch the next @len and after the former piece of data is decompressed.
> > If the current page that we reach has less than 4 bytes left,
> > then we should fetch the next @len at the start of next page.
> 
> Thanks for the fix.
> 
> > --- a/cmds-restore.c
> > +++ b/cmds-restore.c
> > @@ -57,6 +57,9 @@ static int dry_run = 0;
> >  
> >  #define LZO_LEN 4
> >  #define PAGE_CACHE_SIZE 4096
> > +#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
> > +#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1)	\
> > +							& PAGE_CACHE_MASK)
> 
> This is not type-safe, the PAGE_CACHE_SIZE should be unsigned long.
> 
> >  #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
> >  
> >  static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
> > @@ -93,6 +96,28 @@ static inline size_t read_compress_length(unsigned char *buf)
> >  	return le32_to_cpu(dlen);
> >  }
> >  
> > +static void align_if_need(size_t *tot_in, size_t *in_len)
> > +{
> > +	int tot_in_aligned;
> > +	int bytes_left;
> > +
> > +	tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);
> 
> size_t -> int, plus other tricks that happen inside the macro
> 
> > +	bytes_left = tot_in_aligned - *tot_in;
> 
> int = int - size_t
> 
> > +
> > +	if (bytes_left >= LZO_LEN)
> > +		return;
> > +
> > +	/*
> > +	 * The LZO_LEN bytes is guaranteed to be
> > +	 * in one page as a whole, so if a page
> > +	 * has fewer than LZO_LEN bytes left,
> > +	 * the LZO_LEN bytes should be fetched
> > +	 * at the start of the next page
> > +	 */
> 
> Nitpick, the comment can use the whole width of the line
> 
> 	/*
> 	 * The LZO_LEN bytes is guaranteed to be in one page as a whole,
> 	 * so if a page has fewer than LZO_LEN bytes left, the LZO_LEN
> 	 * bytes should be fetched at the start of the next page
> 	 */
> 
> > +	*in_len += bytes_left;
> > +	*tot_in = tot_in_aligned;
> > +}

Thanks David, I will pay more attention to the type-safe issue and
resend.

-Gui

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch
diff mbox

diff --git a/cmds-restore.c b/cmds-restore.c
index 38a131e..5094b05 100644
--- a/cmds-restore.c
+++ b/cmds-restore.c
@@ -57,6 +57,9 @@  static int dry_run = 0;
 
 #define LZO_LEN 4
 #define PAGE_CACHE_SIZE 4096
+#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
+#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1)	\
+							& PAGE_CACHE_MASK)
 #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
 
 static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
@@ -93,6 +96,28 @@  static inline size_t read_compress_length(unsigned char *buf)
 	return le32_to_cpu(dlen);
 }
 
+static void align_if_need(size_t *tot_in, size_t *in_len)
+{
+	int tot_in_aligned;
+	int bytes_left;
+
+	tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);
+	bytes_left = tot_in_aligned - *tot_in;
+
+	if (bytes_left >= LZO_LEN)
+		return;
+
+	/*
+	 * The LZO_LEN bytes is guaranteed to be
+	 * in one page as a whole, so if a page
+	 * has fewer than LZO_LEN bytes left,
+	 * the LZO_LEN bytes should be fetched
+	 * at the start of the next page
+	 */
+	*in_len += bytes_left;
+	*tot_in = tot_in_aligned;
+}
+
 static int decompress_lzo(unsigned char *inbuf, char *outbuf, u64 compress_len,
 			  u64 *decompress_len)
 {
@@ -135,8 +160,8 @@  static int decompress_lzo(unsigned char *inbuf, char *outbuf, u64 compress_len,
 		}
 		out_len += new_len;
 		outbuf += new_len;
+		align_if_need(&tot_in, &in_len);
 		inbuf += in_len;
-		tot_in += in_len;
 	}
 
 	*decompress_len = out_len;