diff mbox

[v4] btrfs-progs: fix page align issue for lzo compress in restore

Message ID 1411461294-24093-1-git-send-email-guihc.fnst@cn.fujitsu.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Gui Hecheng Sept. 23, 2014, 8:34 a.m. UTC
When runing restore under lzo compression, "bad compress length"
problems are encountered.
It is because there is a page align problem with the @decompress_lzo,
as follows:
		|------| |----|-| |------|...|------|
		  page         ^    page       page
			       |
			  3 bytes left

	When lzo compress pages im RAM, lzo will ensure that
	the 4 bytes len will be in one page as a whole.
	There is a situation that 3 (or less) bytes are left
	at the end of a page, and then the 4 bytes len is
	stored at the start of the next page.
	But the @decompress_lzo doesn't goto the start of
	the next page and continue to read the next 4 bytes
	which is across two pages, so a random value is fetched
	as a "bad compress length".

So we check page alignment every time before we are going to
fetch the next @len and after the former piece of data is decompressed.
If the current page that we reach has less than 4 bytes left,
then we should fetch the next @len at the start of next page.

Signed-off-by: Gui Hecheng <guihc.fnst@cn.fujitsu.com>
Reviewed-by: Marc Dietrich <marvin24@gmx.de>
---
changelog
	v1->v2: adopt alignment check method suggested by Marc
	v2->v3: make code more readable
	v3->v4: keep type safety & reformat comments
---
 cmds-restore.c | 27 +++++++++++++++++++++++++--
 1 file changed, 25 insertions(+), 2 deletions(-)

Comments

Marc Dietrich Oct. 14, 2014, 8:06 a.m. UTC | #1
This hasn't landed in an btrfs-progs branch I found. Any update?

Marc

Am Dienstag, 23. September 2014, 16:34:54 schrieb Gui Hecheng:
> When runing restore under lzo compression, "bad compress length"
> problems are encountered.
> It is because there is a page align problem with the @decompress_lzo,
> 
> as follows:
> 		|------| |----|-| |------|...|------|
> 
> 		  page         ^    page       page
> 
> 			  3 bytes left
> 
> 	When lzo compress pages im RAM, lzo will ensure that
> 	the 4 bytes len will be in one page as a whole.
> 	There is a situation that 3 (or less) bytes are left
> 	at the end of a page, and then the 4 bytes len is
> 	stored at the start of the next page.
> 	But the @decompress_lzo doesn't goto the start of
> 	the next page and continue to read the next 4 bytes
> 	which is across two pages, so a random value is fetched
> 	as a "bad compress length".
> 
> So we check page alignment every time before we are going to
> fetch the next @len and after the former piece of data is decompressed.
> If the current page that we reach has less than 4 bytes left,
> then we should fetch the next @len at the start of next page.
> 
> Signed-off-by: Gui Hecheng <guihc.fnst@cn.fujitsu.com>
> Reviewed-by: Marc Dietrich <marvin24@gmx.de>
> ---
> changelog
> 	v1->v2: adopt alignment check method suggested by Marc
> 	v2->v3: make code more readable
> 	v3->v4: keep type safety & reformat comments
> ---
>  cmds-restore.c | 27 +++++++++++++++++++++++++--
>  1 file changed, 25 insertions(+), 2 deletions(-)
> 
> diff --git a/cmds-restore.c b/cmds-restore.c
> index e09acc4..1fe2df0 100644
> --- a/cmds-restore.c
> +++ b/cmds-restore.c
> @@ -56,7 +56,10 @@ static int get_xattrs = 0;
>  static int dry_run = 0;
> 
>  #define LZO_LEN 4
> -#define PAGE_CACHE_SIZE 4096
> +#define PAGE_CACHE_SIZE 4096UL
> +#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
> +#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1)	\
> +							& PAGE_CACHE_MASK)
>  #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
> 
>  static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
> @@ -93,6 +96,26 @@ static inline size_t read_compress_length(unsigned char
> *buf) return le32_to_cpu(dlen);
>  }
> 
> +static void align_if_need(size_t *tot_in, size_t *in_len)
> +{
> +	size_t tot_in_aligned;
> +	size_t bytes_left;
> +
> +	tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);
> +	bytes_left = tot_in_aligned - *tot_in;
> +
> +	if (bytes_left >= LZO_LEN)
> +		return;
> +
> +	/*
> +	 * The LZO_LEN bytes is guaranteed to be in one page as a whole,
> +	 * so if a page has fewer than LZO_LEN bytes left, the LZO_LEN bytes
> +	 * should be fetched at the start of the next page
> +	 */
> +	*in_len += bytes_left;
> +	*tot_in = tot_in_aligned;
> +}
> +
>  static int decompress_lzo(unsigned char *inbuf, char *outbuf, u64
> compress_len, u64 *decompress_len)
>  {
> @@ -135,8 +158,8 @@ static int decompress_lzo(unsigned char *inbuf, char
> *outbuf, u64 compress_len, }
>  		out_len += new_len;
>  		outbuf += new_len;
> +		align_if_need(&tot_in, &in_len);
>  		inbuf += in_len;
> -		tot_in += in_len;
>  	}
> 
>  	*decompress_len = out_len;
David Sterba Oct. 14, 2014, 9:32 a.m. UTC | #2
On Tue, Oct 14, 2014 at 10:06:16AM +0200, Marc Dietrich wrote:
> This hasn't landed in an btrfs-progs branch I found. Any update?

I had it tagged for review and found something that needs fixing. The
PAGE_CACHE_SIZE is hardcoded to 4k, this will break on filesystems with
larger sectors (eg. the powerpc machines). I'll scheudule the patch post
3.17, with a fix.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gui Hecheng Nov. 27, 2014, 3:02 a.m. UTC | #3
On Tue, 2014-10-14 at 11:32 +0200, David Sterba wrote:
> On Tue, Oct 14, 2014 at 10:06:16AM +0200, Marc Dietrich wrote:
> > This hasn't landed in an btrfs-progs branch I found. Any update?
> 
> I had it tagged for review and found something that needs fixing. The
> PAGE_CACHE_SIZE is hardcoded to 4k, this will break on filesystems with
> larger sectors (eg. the powerpc machines). I'll scheudule the patch post
> 3.17, with a fix.

Hi David?

I note that this patch is not yet in the latest integration, how's the
fix going?

-Gui

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
David Sterba Jan. 2, 2015, 3:09 p.m. UTC | #4
On Thu, Nov 27, 2014 at 11:02:38AM +0800, Gui Hecheng wrote:
> I note that this patch is not yet in the latest integration, how's the
> fix going?

The patch was still buggy, a fix will land in 3.18.1
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/cmds-restore.c b/cmds-restore.c
index e09acc4..1fe2df0 100644
--- a/cmds-restore.c
+++ b/cmds-restore.c
@@ -56,7 +56,10 @@  static int get_xattrs = 0;
 static int dry_run = 0;
 
 #define LZO_LEN 4
-#define PAGE_CACHE_SIZE 4096
+#define PAGE_CACHE_SIZE 4096UL
+#define PAGE_CACHE_MASK (~(PAGE_CACHE_SIZE - 1))
+#define PAGE_CACHE_ALIGN(addr) (((addr) + PAGE_CACHE_SIZE - 1)	\
+							& PAGE_CACHE_MASK)
 #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
 
 static int decompress_zlib(char *inbuf, char *outbuf, u64 compress_len,
@@ -93,6 +96,26 @@  static inline size_t read_compress_length(unsigned char *buf)
 	return le32_to_cpu(dlen);
 }
 
+static void align_if_need(size_t *tot_in, size_t *in_len)
+{
+	size_t tot_in_aligned;
+	size_t bytes_left;
+
+	tot_in_aligned = PAGE_CACHE_ALIGN(*tot_in);
+	bytes_left = tot_in_aligned - *tot_in;
+
+	if (bytes_left >= LZO_LEN)
+		return;
+
+	/*
+	 * The LZO_LEN bytes is guaranteed to be in one page as a whole,
+	 * so if a page has fewer than LZO_LEN bytes left, the LZO_LEN bytes
+	 * should be fetched at the start of the next page
+	 */
+	*in_len += bytes_left;
+	*tot_in = tot_in_aligned;
+}
+
 static int decompress_lzo(unsigned char *inbuf, char *outbuf, u64 compress_len,
 			  u64 *decompress_len)
 {
@@ -135,8 +158,8 @@  static int decompress_lzo(unsigned char *inbuf, char *outbuf, u64 compress_len,
 		}
 		out_len += new_len;
 		outbuf += new_len;
+		align_if_need(&tot_in, &in_len);
 		inbuf += in_len;
-		tot_in += in_len;
 	}
 
 	*decompress_len = out_len;