diff mbox series

[04/10] libxfs: retain ifork_ops when flushing inode

Message ID 155594791533.115924.7540619376750686973.stgit@magnolia (mailing list archive)
State Accepted, archived
Headers show
Series xfsprogs-5.0: fix various problems | expand

Commit Message

Darrick J. Wong April 22, 2019, 3:45 p.m. UTC
From: Darrick J. Wong <darrick.wong@oracle.com>

Retain the ifork ops used to validate the inode so that we can use the
same one to iflush it.  xfs_repair phase 6 can use multiple transactions
to fix various inode problems, which means that the inode might not be
fully fixed when each transaction commits.

This can be a particular problem if there's a shortform directory with
both invalid directory entries and incorrect i8count.  Phase 3 will set
the parent inode to "0" to signal to phase 6 that it needs to reset the
parent and i8count, but phase 6 starts a transaction to junk the bad
entries which fail to commit because the parent is invalid:

fixing i8count in inode 69022994673
Invalid inode number 0x0
xfs_dir_ino_validate: XFS_ERROR_REPORT
Metadata corruption detected at 0x464eb0, inode 0x10121750f1 data fork
xfs_repair: warning - iflush_int failed (-117)

And thus the inode fixes never get written out.

Reported-by: Arkadiusz Miskiewicz <arekm@maven.pl>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
---
 include/xfs_inode.h |    1 +
 libxfs/rdwr.c       |    1 +
 libxfs/util.c       |    2 +-
 3 files changed, 3 insertions(+), 1 deletion(-)

Comments

Bill O'Donnell April 22, 2019, 7:40 p.m. UTC | #1
On Mon, Apr 22, 2019 at 08:45:15AM -0700, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Retain the ifork ops used to validate the inode so that we can use the
> same one to iflush it.  xfs_repair phase 6 can use multiple transactions
> to fix various inode problems, which means that the inode might not be
> fully fixed when each transaction commits.
> 
> This can be a particular problem if there's a shortform directory with
> both invalid directory entries and incorrect i8count.  Phase 3 will set
> the parent inode to "0" to signal to phase 6 that it needs to reset the
> parent and i8count, but phase 6 starts a transaction to junk the bad
> entries which fail to commit because the parent is invalid:
> 
> fixing i8count in inode 69022994673
> Invalid inode number 0x0
> xfs_dir_ino_validate: XFS_ERROR_REPORT
> Metadata corruption detected at 0x464eb0, inode 0x10121750f1 data fork
> xfs_repair: warning - iflush_int failed (-117)
> 
> And thus the inode fixes never get written out.
> 
> Reported-by: Arkadiusz Miskiewicz <arekm@maven.pl>
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>

Reviewed-by: Bill O'Donnell <billodo@redhat.com>

> ---
>  include/xfs_inode.h |    1 +
>  libxfs/rdwr.c       |    1 +
>  libxfs/util.c       |    2 +-
>  3 files changed, 3 insertions(+), 1 deletion(-)
> 
> 
> diff --git a/include/xfs_inode.h b/include/xfs_inode.h
> index 79ec3a2d..e1e8b430 100644
> --- a/include/xfs_inode.h
> +++ b/include/xfs_inode.h
> @@ -51,6 +51,7 @@ typedef struct xfs_inode {
>  
>  	xfs_fsize_t		i_size;		/* in-memory size */
>  	const struct xfs_dir_ops *d_ops;	/* directory ops vector */
> +	struct xfs_ifork_ops	*i_fork_ops;	/* fork verifiers */
>  	struct inode		i_vnode;
>  } xfs_inode_t;
>  
> diff --git a/libxfs/rdwr.c b/libxfs/rdwr.c
> index a00360e7..69d5abb2 100644
> --- a/libxfs/rdwr.c
> +++ b/libxfs/rdwr.c
> @@ -1391,6 +1391,7 @@ libxfs_iget(
>  		return error;
>  	}
>  
> +	ip->i_fork_ops = ifork_ops;
>  	if (!libxfs_inode_verify_forks(ip, ifork_ops)) {
>  		libxfs_irele(ip);
>  		return -EFSCORRUPTED;
> diff --git a/libxfs/util.c b/libxfs/util.c
> index 4ac151e6..2e3b9d51 100644
> --- a/libxfs/util.c
> +++ b/libxfs/util.c
> @@ -422,7 +422,7 @@ libxfs_iflush_int(xfs_inode_t *ip, xfs_buf_t *bp)
>  		VFS_I(ip)->i_version++;
>  
>  	/* Check the inline fork data before we write out. */
> -	if (!libxfs_inode_verify_forks(ip, &xfs_default_ifork_ops))
> +	if (!libxfs_inode_verify_forks(ip, ip->i_fork_ops))
>  		return -EFSCORRUPTED;
>  
>  	/*
>
Eric Sandeen April 22, 2019, 7:45 p.m. UTC | #2
On 4/22/19 10:45 AM, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Retain the ifork ops used to validate the inode so that we can use the
> same one to iflush it.  xfs_repair phase 6 can use multiple transactions
> to fix various inode problems, which means that the inode might not be
> fully fixed when each transaction commits.
> 
> This can be a particular problem if there's a shortform directory with
> both invalid directory entries and incorrect i8count.  Phase 3 will set
> the parent inode to "0" to signal to phase 6 that it needs to reset the
> parent and i8count, but phase 6 starts a transaction to junk the bad
> entries which fail to commit because the parent is invalid:
> 
> fixing i8count in inode 69022994673
> Invalid inode number 0x0
> xfs_dir_ino_validate: XFS_ERROR_REPORT
> Metadata corruption detected at 0x464eb0, inode 0x10121750f1 data fork
> xfs_repair: warning - iflush_int failed (-117)
> 
> And thus the inode fixes never get written out.
> 
> Reported-by: Arkadiusz Miskiewicz <arekm@maven.pl>
> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>

I might have combined this with the next patch, but I guess
I can see the rationale either way.  :)

Reviewed-by: Eric Sandeen <sandeen@redhat.com>

> ---
>  include/xfs_inode.h |    1 +
>  libxfs/rdwr.c       |    1 +
>  libxfs/util.c       |    2 +-
>  3 files changed, 3 insertions(+), 1 deletion(-)
> 
> 
> diff --git a/include/xfs_inode.h b/include/xfs_inode.h
> index 79ec3a2d..e1e8b430 100644
> --- a/include/xfs_inode.h
> +++ b/include/xfs_inode.h
> @@ -51,6 +51,7 @@ typedef struct xfs_inode {
>  
>  	xfs_fsize_t		i_size;		/* in-memory size */
>  	const struct xfs_dir_ops *d_ops;	/* directory ops vector */
> +	struct xfs_ifork_ops	*i_fork_ops;	/* fork verifiers */
>  	struct inode		i_vnode;
>  } xfs_inode_t;
>  
> diff --git a/libxfs/rdwr.c b/libxfs/rdwr.c
> index a00360e7..69d5abb2 100644
> --- a/libxfs/rdwr.c
> +++ b/libxfs/rdwr.c
> @@ -1391,6 +1391,7 @@ libxfs_iget(
>  		return error;
>  	}
>  
> +	ip->i_fork_ops = ifork_ops;
>  	if (!libxfs_inode_verify_forks(ip, ifork_ops)) {
>  		libxfs_irele(ip);
>  		return -EFSCORRUPTED;
> diff --git a/libxfs/util.c b/libxfs/util.c
> index 4ac151e6..2e3b9d51 100644
> --- a/libxfs/util.c
> +++ b/libxfs/util.c
> @@ -422,7 +422,7 @@ libxfs_iflush_int(xfs_inode_t *ip, xfs_buf_t *bp)
>  		VFS_I(ip)->i_version++;
>  
>  	/* Check the inline fork data before we write out. */
> -	if (!libxfs_inode_verify_forks(ip, &xfs_default_ifork_ops))
> +	if (!libxfs_inode_verify_forks(ip, ip->i_fork_ops))
>  		return -EFSCORRUPTED;
>  
>  	/*
>
Arkadiusz Miƛkiewicz Oct. 2, 2019, 6 a.m. UTC | #3
On 22/04/2019 17:45, Darrick J. Wong wrote:
> From: Darrick J. Wong <darrick.wong@oracle.com>
> 
> Retain the ifork ops used to validate the inode so that we can use the
> same one to iflush it.  xfs_repair phase 6 can use multiple transactions
> to fix various inode problems, which means that the inode might not be
> fully fixed when each transaction commits.
> 
> This can be a particular problem if there's a shortform directory with
> both invalid directory entries and incorrect i8count.  Phase 3 will set
> the parent inode to "0" to signal to phase 6 that it needs to reset the
> parent and i8count, but phase 6 starts a transaction to junk the bad
> entries which fail to commit because the parent is invalid:
> 
> fixing i8count in inode 69022994673
> Invalid inode number 0x0
> xfs_dir_ino_validate: XFS_ERROR_REPORT
> Metadata corruption detected at 0x464eb0, inode 0x10121750f1 data fork
> xfs_repair: warning - iflush_int failed (-117)
> 
> And thus the inode fixes never get written out.


I just hit something similar again. xfsprogs from current for-next
(b94a69ac being latest commit in it)

[...]
bogus .. inode number (0) in directory inode 36510278809, clearing inode
number
[...]
entry "langs" in dir ino 34365014856 doesn't have a .. entry, will set
it in ino 36510278809.
[...]
setting .. in sf dir inode 36510278809 to 34365014856
Metadata corruption detected at 0x460e1c, inode 0x8802ea499 data fork
xfs_repair: warning - iflush_int failed (-117)
[...]
Phase 7 - verify and correct link counts...
Invalid inode number 0x0
xfs_dir_ino_validate: XFS_ERROR_REPORT
Metadata corruption detected at 0x460da8, inode 0x8802ea499 data fork

fatal error -- couldn't map inode 36510278809, err = 117


Full log output below:

> # /tmp/qq/sbin/xfs_repair -vvv -o bhash=256000 /dev/sdd1
> Phase 1 - find and verify superblock...
>         - reporting progress in intervals of 15 minutes
> Phase 2 - using internal log
>         - zero log...
> zero_log: head block 3022391 tail block 3022391
>         - scan filesystem freespace and inode maps...
>         - 14:26:44: scanning filesystem freespace - 39 of 39 allocation groups done
>         - found root inode chunk
> libxfs_bcache: 0xdb0900
> Max supported entries = 2048000
> Max utilized entries = 40516
> Active entries = 40516
> Hash table size = 256000
> Hits = 0
> Misses = 40517
> Hit ratio =  0.00
> MRU 0 entries =  40516 (100%)
> MRU 1 entries =      0 (  0%)
> MRU 2 entries =      0 (  0%)
> MRU 3 entries =      0 (  0%)
> MRU 4 entries =      0 (  0%)
> MRU 5 entries =      0 (  0%)
> MRU 6 entries =      0 (  0%)
> MRU 7 entries =      0 (  0%)
> MRU 8 entries =      0 (  0%)
> MRU 9 entries =      0 (  0%)
> MRU 10 entries =      0 (  0%)
> MRU 11 entries =      0 (  0%)
> MRU 12 entries =      0 (  0%)
> MRU 13 entries =      0 (  0%)
> MRU 14 entries =      0 (  0%)
> MRU 15 entries =      0 (  0%)
> Dirty MRU 16 entries =      0 (  0%)
> Hash buckets with   0 entries 218473 (  0%)
> Hash buckets with   1 entries  34691 ( 85%)
> Hash buckets with   2 entries   2690 ( 13%)
> Hash buckets with   3 entries    139 (  1%)
> Hash buckets with   4 entries      7 (  0%)
> Phase 3 - for each AG...
>         - scan and clear agi unlinked lists...
>         - 14:26:44: scanning agi unlinked lists - 39 of 39 allocation groups done
>         - process known inodes and perform inode discovery...
>         - agno = 15
>         - agno = 0
>         - agno = 30
>         - agno = 16
>         - agno = 31
>         - agno = 1
>         - agno = 17
>         - agno = 32
> bogus .. inode number (0) in directory inode 36510278809, clearing inode number
> bogus .. inode number (0) in directory inode 36512379414, clearing inode number
>         - agno = 18
>         - agno = 19
>         - agno = 33
>         - agno = 20
>         - agno = 34
>         - agno = 21
>         - agno = 35
>         - agno = 2
>         - agno = 22
>         - agno = 36
>         - agno = 23
>         - agno = 37
>         - agno = 24
>         - agno = 3
>         - agno = 38
>         - agno = 25
>         - agno = 4
>         - agno = 26
>         - agno = 5
>         - agno = 27
>         - agno = 6
>         - agno = 28
>         - agno = 7
>         - agno = 29
>         - agno = 8
>         - agno = 9
>         - agno = 10
>         - agno = 11
>         - agno = 12
>         - agno = 13
>         - agno = 14
>         - 17:27:19: process known inodes and inode discovery - 173158784 of 173158784 inodes done
>         - process newly discovered inodes...
>         - 17:27:19: process newly discovered inodes - 39 of 39 allocation groups done
> libxfs_bcache: 0xdb0900
> Max supported entries = 2048000
> Max utilized entries = 2048000
> Active entries = 2047963
> Hash table size = 256000
> Hits = 33258462
> Misses = 39986202
> Hit ratio = 45.41
> MRU 0 entries =  21826 (  1%)
> MRU 1 entries =      0 (  0%)
> MRU 2 entries =  43908 (  2%)
> MRU 3 entries = 218917 ( 10%)
> MRU 4 entries =  17304 (  0%)
> MRU 5 entries =  45429 (  2%)
> MRU 6 entries = 1097133 ( 53%)
> MRU 7 entries =  68828 (  3%)
> MRU 8 entries =      0 (  0%)
> MRU 9 entries =      0 (  0%)
> MRU 10 entries =      0 (  0%)
> MRU 11 entries = 318994 ( 15%)
> MRU 12 entries = 104225 (  5%)
> MRU 13 entries = 110173 (  5%)
> MRU 14 entries =   1226 (  0%)
> MRU 15 entries =      0 (  0%)
> Dirty MRU 16 entries =      0 (  0%)
> Hash buckets with   0 entries     75 (  0%)
> Hash buckets with   1 entries    669 (  0%)
> Hash buckets with   2 entries   2673 (  0%)
> Hash buckets with   3 entries   7083 (  1%)
> Hash buckets with   4 entries  14564 (  2%)
> Hash buckets with   5 entries  23510 (  5%)
> Hash buckets with   6 entries  31292 (  9%)
> Hash buckets with   7 entries  35847 ( 12%)
> Hash buckets with   8 entries  35808 ( 13%)
> Hash buckets with   9 entries  32245 ( 14%)
> Hash buckets with  10 entries  25460 ( 12%)
> Hash buckets with  11 entries  18399 (  9%)
> Hash buckets with  12 entries  12437 (  7%)
> Hash buckets with  13 entries   7502 (  4%)
> Hash buckets with  14 entries   4257 (  2%)
> Hash buckets with  15 entries   2172 (  1%)
> Hash buckets with  16 entries   1114 (  0%)
> Hash buckets with  17 entries    542 (  0%)
> Hash buckets with  18 entries    200 (  0%)
> Hash buckets with  19 entries     88 (  0%)
> Hash buckets with  20 entries     39 (  0%)
> Hash buckets with  21 entries     14 (  0%)
> Hash buckets with  22 entries      8 (  0%)
> Hash buckets with  23 entries      2 (  0%)
> Phase 4 - check for duplicate blocks...
>         - setting up duplicate extent list...
>         - 17:27:21: setting up duplicate extent list - 39 of 39 allocation groups done
>         - check for inodes claiming duplicate blocks...
>         - agno = 30
>         - agno = 15
>         - agno = 0
>         - agno = 16
>         - agno = 1
>         - agno = 31
>         - agno = 17
> bogus .. inode number (0) in directory inode 36510278809, clearing inode number
> bogus .. inode number (0) in directory inode 36512379414, clearing inode number
>         - agno = 18
>         - agno = 32
>         - agno = 19
>         - agno = 20
>         - agno = 33
>         - agno = 21
>         - agno = 34
>         - agno = 22
>         - agno = 35
>         - agno = 23
>         - agno = 36
>         - agno = 2
>         - agno = 37
>         - agno = 24
>         - agno = 25
>         - agno = 38
>         - agno = 26
>         - agno = 27
>         - agno = 28
>         - agno = 3
>         - agno = 29
>         - agno = 4
>         - agno = 5
>         - agno = 6
>         - agno = 7
>         - agno = 8
>         - agno = 9
>         - agno = 10
>         - agno = 11
>         - agno = 12
>         - agno = 13
>         - agno = 14
>         - 20:44:23: check for inodes claiming duplicate blocks - 173158784 of 173158784 inodes done
> libxfs_bcache: 0xdb0900
> Max supported entries = 2048000
> Max utilized entries = 2048000
> Active entries = 2047999
> Hash table size = 256000
> Hits = 64756246
> Misses = 82886582
> Hit ratio = 43.86
> MRU 0 entries =  19324 (  0%)
> MRU 1 entries =      0 (  0%)
> MRU 2 entries =  47524 (  2%)
> MRU 3 entries = 217973 ( 10%)
> MRU 4 entries =  16829 (  0%)
> MRU 5 entries =  45729 (  2%)
> MRU 6 entries = 1071173 ( 52%)
> MRU 7 entries =      0 (  0%)
> MRU 8 entries =      0 (  0%)
> MRU 9 entries =      0 (  0%)
> MRU 10 entries =      0 (  0%)
> MRU 11 entries = 400426 ( 19%)
> MRU 12 entries =  97317 (  4%)
> MRU 13 entries = 130545 (  6%)
> MRU 14 entries =   1159 (  0%)
> MRU 15 entries =      0 (  0%)
> Dirty MRU 16 entries =      0 (  0%)
> Hash buckets with   0 entries     73 (  0%)
> Hash buckets with   1 entries    664 (  0%)
> Hash buckets with   2 entries   2642 (  0%)
> Hash buckets with   3 entries   7175 (  1%)
> Hash buckets with   4 entries  14586 (  2%)
> Hash buckets with   5 entries  23505 (  5%)
> Hash buckets with   6 entries  31069 (  9%)
> Hash buckets with   7 entries  35990 ( 12%)
> Hash buckets with   8 entries  36151 ( 14%)
> Hash buckets with   9 entries  31999 ( 14%)
> Hash buckets with  10 entries  25381 ( 12%)
> Hash buckets with  11 entries  18361 (  9%)
> Hash buckets with  12 entries  12288 (  7%)
> Hash buckets with  13 entries   7494 (  4%)
> Hash buckets with  14 entries   4346 (  2%)
> Hash buckets with  15 entries   2285 (  1%)
> Hash buckets with  16 entries   1126 (  0%)
> Hash buckets with  17 entries    502 (  0%)
> Hash buckets with  18 entries    211 (  0%)
> Hash buckets with  19 entries     95 (  0%)
> Hash buckets with  20 entries     28 (  0%)
> Hash buckets with  21 entries     16 (  0%)
> Hash buckets with  22 entries     12 (  0%)
> Hash buckets with  23 entries      1 (  0%)
> Phase 5 - rebuild AG headers and trees...
>         - agno = 0
>         - agno = 1
>         - agno = 2
>         - agno = 3
>         - agno = 4
>         - agno = 5
>         - agno = 6
>         - agno = 7
>         - agno = 8
>         - agno = 9
>         - agno = 10
>         - agno = 11
>         - agno = 12
>         - agno = 13
>         - agno = 14
>         - agno = 15
>         - agno = 16
>         - agno = 17
>         - agno = 18
>         - agno = 19
>         - agno = 20
>         - agno = 21
>         - agno = 22
>         - agno = 23
>         - agno = 24
>         - agno = 25
>         - agno = 26
>         - agno = 27
>         - agno = 28
>         - agno = 29
>         - agno = 30
>         - agno = 31
>         - agno = 32
>         - agno = 33
>         - agno = 34
>         - agno = 35
>         - agno = 36
>         - agno = 37
>         - agno = 38
>         - 20:44:35: rebuild AG headers and trees - 39 of 39 allocation groups done
>         - reset superblock...
> libxfs_bcache: 0xdb0900
> Max supported entries = 2048000
> Max utilized entries = 2048000
> Active entries = 2047979
> Hash table size = 256000
> Hits = 64756472
> Misses = 82918282
> Hit ratio = 43.85
> MRU 0 entries =  19304 (  0%)
> MRU 1 entries =      0 (  0%)
> MRU 2 entries =  47524 (  2%)
> MRU 3 entries = 217973 ( 10%)
> MRU 4 entries =  16829 (  0%)
> MRU 5 entries =  45729 (  2%)
> MRU 6 entries = 1071173 ( 52%)
> MRU 7 entries =      0 (  0%)
> MRU 8 entries =      0 (  0%)
> MRU 9 entries =      0 (  0%)
> MRU 10 entries =      0 (  0%)
> MRU 11 entries = 400426 ( 19%)
> MRU 12 entries =  97317 (  4%)
> MRU 13 entries = 130545 (  6%)
> MRU 14 entries =   1159 (  0%)
> MRU 15 entries =      0 (  0%)
> Dirty MRU 16 entries =      0 (  0%)
> Hash buckets with   0 entries     73 (  0%)
> Hash buckets with   1 entries    678 (  0%)
> Hash buckets with   2 entries   2631 (  0%)
> Hash buckets with   3 entries   7189 (  1%)
> Hash buckets with   4 entries  14612 (  2%)
> Hash buckets with   5 entries  23535 (  5%)
> Hash buckets with   6 entries  31083 (  9%)
> Hash buckets with   7 entries  35948 ( 12%)
> Hash buckets with   8 entries  35995 ( 14%)
> Hash buckets with   9 entries  32122 ( 14%)
> Hash buckets with  10 entries  25412 ( 12%)
> Hash buckets with  11 entries  18278 (  9%)
> Hash buckets with  12 entries  12245 (  7%)
> Hash buckets with  13 entries   7570 (  4%)
> Hash buckets with  14 entries   4320 (  2%)
> Hash buckets with  15 entries   2299 (  1%)
> Hash buckets with  16 entries   1131 (  0%)
> Hash buckets with  17 entries    497 (  0%)
> Hash buckets with  18 entries    231 (  0%)
> Hash buckets with  19 entries     95 (  0%)
> Hash buckets with  20 entries     31 (  0%)
> Hash buckets with  21 entries     15 (  0%)
> Hash buckets with  22 entries      9 (  0%)
> Hash buckets with  23 entries      1 (  0%)
> Phase 6 - check inode connectivity...
>         - resetting contents of realtime bitmap and summary inodes
>         - traversing filesystem ...
>         - agno = 0
>         - agno = 1
>         - agno = 2
>         - agno = 3
>         - agno = 4
>         - agno = 5
>         - agno = 6
>         - agno = 7
>         - agno = 8
>         - agno = 9
>         - agno = 10
>         - agno = 11
>         - agno = 12
> entry "servmask" in dir ino 25777842447 doesn't have a .. entry, will set it in ino 36512379414.
>         - agno = 13
>         - agno = 14
>         - agno = 15
>         - agno = 16
> entry "langs" in dir ino 34365014856 doesn't have a .. entry, will set it in ino 36510278809.
>         - agno = 17
>         - agno = 18
>         - agno = 19
>         - agno = 20
>         - agno = 21
>         - agno = 22
>         - agno = 23
>         - agno = 24
>         - agno = 25
>         - agno = 26
>         - agno = 27
>         - agno = 28
>         - agno = 29
>         - agno = 30
>         - agno = 31
>         - agno = 32
>         - agno = 33
>         - agno = 34
>         - agno = 35
>         - agno = 36
>         - agno = 37
>         - agno = 38
> setting .. in sf dir inode 36512379414 to 25777842447
> Metadata corruption detected at 0x460e1c, inode 0x8804eb216 data fork
> xfs_repair: warning - iflush_int failed (-117)
> setting .. in sf dir inode 36510278809 to 34365014856
> Metadata corruption detected at 0x460e1c, inode 0x8802ea499 data fork
> xfs_repair: warning - iflush_int failed (-117)
>         - traversal finished ...
>         - moving disconnected inodes to lost+found ...
> libxfs_bcache: 0xdb0900
> Max supported entries = 2048000
> Max utilized entries = 2048000
> Active entries = 2047971
> Hash table size = 256000
> Hits = 167418187
> Misses = 119244070
> Hit ratio = 58.40
> MRU 0 entries =  80752 (  3%)
> MRU 1 entries =      0 (  0%)
> MRU 2 entries =   1830 (  0%)
> MRU 3 entries = 500840 ( 24%)
> MRU 4 entries =  18369 (  0%)
> MRU 5 entries =      0 (  0%)
> MRU 6 entries = 1110024 ( 54%)
> MRU 7 entries = 133410 (  6%)
> MRU 8 entries =      0 (  0%)
> MRU 9 entries =      0 (  0%)
> MRU 10 entries = 202746 (  9%)
> MRU 11 entries =      0 (  0%)
> MRU 12 entries =      0 (  0%)
> MRU 13 entries =      0 (  0%)
> MRU 14 entries =      0 (  0%)
> MRU 15 entries =      0 (  0%)
> Dirty MRU 16 entries =      0 (  0%)
> Hash buckets with   0 entries     72 (  0%)
> Hash buckets with   1 entries    655 (  0%)
> Hash buckets with   2 entries   2512 (  0%)
> Hash buckets with   3 entries   7015 (  1%)
> Hash buckets with   4 entries  14124 (  2%)
> Hash buckets with   5 entries  23498 (  5%)
> Hash buckets with   6 entries  31301 (  9%)
> Hash buckets with   7 entries  36059 ( 12%)
> Hash buckets with   8 entries  36530 ( 14%)
> Hash buckets with   9 entries  32219 ( 14%)
> Hash buckets with  10 entries  25732 ( 12%)
> Hash buckets with  11 entries  18590 (  9%)
> Hash buckets with  12 entries  12175 (  7%)
> Hash buckets with  13 entries   7484 (  4%)
> Hash buckets with  14 entries   4068 (  2%)
> Hash buckets with  15 entries   2087 (  1%)
> Hash buckets with  16 entries   1073 (  0%)
> Hash buckets with  17 entries    487 (  0%)
> Hash buckets with  18 entries    194 (  0%)
> Hash buckets with  19 entries     81 (  0%)
> Hash buckets with  20 entries     34 (  0%)
> Hash buckets with  21 entries      7 (  0%)
> Hash buckets with  22 entries      2 (  0%)
> Hash buckets with  23 entries      1 (  0%)
> Phase 7 - verify and correct link counts...
> Invalid inode number 0x0
> xfs_dir_ino_validate: XFS_ERROR_REPORT
> Metadata corruption detected at 0x460da8, inode 0x8802ea499 data fork
> 
> fatal error -- couldn't map inode 36510278809, err = 117
>
diff mbox series

Patch

diff --git a/include/xfs_inode.h b/include/xfs_inode.h
index 79ec3a2d..e1e8b430 100644
--- a/include/xfs_inode.h
+++ b/include/xfs_inode.h
@@ -51,6 +51,7 @@  typedef struct xfs_inode {
 
 	xfs_fsize_t		i_size;		/* in-memory size */
 	const struct xfs_dir_ops *d_ops;	/* directory ops vector */
+	struct xfs_ifork_ops	*i_fork_ops;	/* fork verifiers */
 	struct inode		i_vnode;
 } xfs_inode_t;
 
diff --git a/libxfs/rdwr.c b/libxfs/rdwr.c
index a00360e7..69d5abb2 100644
--- a/libxfs/rdwr.c
+++ b/libxfs/rdwr.c
@@ -1391,6 +1391,7 @@  libxfs_iget(
 		return error;
 	}
 
+	ip->i_fork_ops = ifork_ops;
 	if (!libxfs_inode_verify_forks(ip, ifork_ops)) {
 		libxfs_irele(ip);
 		return -EFSCORRUPTED;
diff --git a/libxfs/util.c b/libxfs/util.c
index 4ac151e6..2e3b9d51 100644
--- a/libxfs/util.c
+++ b/libxfs/util.c
@@ -422,7 +422,7 @@  libxfs_iflush_int(xfs_inode_t *ip, xfs_buf_t *bp)
 		VFS_I(ip)->i_version++;
 
 	/* Check the inline fork data before we write out. */
-	if (!libxfs_inode_verify_forks(ip, &xfs_default_ifork_ops))
+	if (!libxfs_inode_verify_forks(ip, ip->i_fork_ops))
 		return -EFSCORRUPTED;
 
 	/*