@@ -590,7 +590,7 @@ cmp(const void *s1, const void *s2)
(bs1->bs_version == XFS_BULKSTAT_VERSION_V5 &&
bs2->bs_version == XFS_BULKSTAT_VERSION_V5));
- return (bs2->bs_extents - bs1->bs_extents);
+ return (bs2->bs_extents64 - bs1->bs_extents64);
}
/*
@@ -655,7 +655,7 @@ fsrfs(char *mntdir, xfs_ino_t startino, int targetrange)
for (p = buf, endp = (buf + buflenout); p < endp ; p++) {
/* Do some obvious checks now */
if (((p->bs_mode & S_IFMT) != S_IFREG) ||
- (p->bs_extents < 2))
+ (p->bs_extents64 < 2))
continue;
ret = -xfrog_bulkstat_v5_to_v1(&fsxfd, &bs1, p);
@@ -57,6 +57,7 @@ dump_bulkstat(
printf("\tbs_sick = 0x%"PRIx16"\n", bstat->bs_sick);
printf("\tbs_checked = 0x%"PRIx16"\n", bstat->bs_checked);
printf("\tbs_mode = 0%"PRIo16"\n", bstat->bs_mode);
+ printf("\tbs_extents64 = %"PRIu64"\n", bstat->bs_extents64);
};
static void
@@ -56,6 +56,9 @@ xfrog_bulkstat_single5(
if (flags & ~(XFS_BULK_IREQ_SPECIAL))
return -EINVAL;
+ if (xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64)
+ flags |= XFS_BULK_IREQ_NREXT64;
+
ret = xfrog_bulkstat_alloc_req(1, ino, &req);
if (ret)
return ret;
@@ -73,6 +76,12 @@ xfrog_bulkstat_single5(
}
memcpy(bulkstat, req->bulkstat, sizeof(struct xfs_bulkstat));
+
+ if (!(xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64)) {
+ bulkstat->bs_extents64 = bulkstat->bs_extents;
+ bulkstat->bs_extents = 0;
+ }
+
free:
free(req);
return ret;
@@ -129,6 +138,7 @@ xfrog_bulkstat_single(
switch (error) {
case -EOPNOTSUPP:
case -ENOTTY:
+ assert(!(xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64));
xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
break;
}
@@ -259,10 +269,23 @@ xfrog_bulkstat5(
struct xfs_bulkstat_req *req)
{
int ret;
+ int i;
+
+ if (xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64)
+ req->hdr.flags |= XFS_BULK_IREQ_NREXT64;
ret = ioctl(xfd->fd, XFS_IOC_BULKSTAT, req);
if (ret)
return -errno;
+
+ if (!(xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64)) {
+ for (i = 0; i < req->hdr.ocount; i++) {
+ req->bulkstat[i].bs_extents64 =
+ req->bulkstat[i].bs_extents;
+ req->bulkstat[i].bs_extents = 0;
+ }
+ }
+
return 0;
}
@@ -316,6 +339,7 @@ xfrog_bulkstat(
switch (error) {
case -EOPNOTSUPP:
case -ENOTTY:
+ assert(!(xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64));
xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
break;
}
@@ -342,6 +366,7 @@ xfrog_bulkstat_v5_to_v1(
const struct xfs_bulkstat *bs5)
{
if (bs5->bs_aextents > UINT16_MAX ||
+ bs5->bs_extents64 > INT32_MAX ||
cvt_off_fsb_to_b(xfd, bs5->bs_extsize_blks) > UINT32_MAX ||
cvt_off_fsb_to_b(xfd, bs5->bs_cowextsize_blks) > UINT32_MAX ||
time_too_big(bs5->bs_atime) ||
@@ -366,7 +391,7 @@ xfrog_bulkstat_v5_to_v1(
bs1->bs_blocks = bs5->bs_blocks;
bs1->bs_xflags = bs5->bs_xflags;
bs1->bs_extsize = cvt_off_fsb_to_b(xfd, bs5->bs_extsize_blks);
- bs1->bs_extents = bs5->bs_extents;
+ bs1->bs_extents = bs5->bs_extents64;
bs1->bs_gen = bs5->bs_gen;
bs1->bs_projid_lo = bs5->bs_projectid & 0xFFFF;
bs1->bs_forkoff = bs5->bs_forkoff;
@@ -407,7 +432,6 @@ xfrog_bulkstat_v1_to_v5(
bs5->bs_blocks = bs1->bs_blocks;
bs5->bs_xflags = bs1->bs_xflags;
bs5->bs_extsize_blks = cvt_b_to_off_fsbt(xfd, bs1->bs_extsize);
- bs5->bs_extents = bs1->bs_extents;
bs5->bs_gen = bs1->bs_gen;
bs5->bs_projectid = bstat_get_projid(bs1);
bs5->bs_forkoff = bs1->bs_forkoff;
@@ -415,6 +439,7 @@ xfrog_bulkstat_v1_to_v5(
bs5->bs_checked = bs1->bs_checked;
bs5->bs_cowextsize_blks = cvt_b_to_off_fsbt(xfd, bs1->bs_cowextsize);
bs5->bs_aextents = bs1->bs_aextents;
+ bs5->bs_extents64 = bs1->bs_extents;
}
/* Allocate a bulkstat request. Returns zero or a negative error code. */
@@ -393,7 +393,7 @@ struct xfs_bulkstat {
uint32_t bs_extsize_blks; /* extent size hint, blocks */
uint32_t bs_nlink; /* number of links */
- uint32_t bs_extents; /* number of extents */
+ uint32_t bs_extents; /* 32-bit data fork extent counter */
uint32_t bs_aextents; /* attribute number of extents */
uint16_t bs_version; /* structure version */
uint16_t bs_forkoff; /* inode fork offset in bytes */
@@ -402,8 +402,9 @@ struct xfs_bulkstat {
uint16_t bs_checked; /* checked inode metadata */
uint16_t bs_mode; /* type and mode */
uint16_t bs_pad2; /* zeroed */
+ uint64_t bs_extents64; /* 64-bit data fork extent counter */
- uint64_t bs_pad[7]; /* zeroed */
+ uint64_t bs_pad[6]; /* zeroed */
};
#define XFS_BULKSTAT_VERSION_V1 (1)
@@ -484,8 +485,19 @@ struct xfs_bulk_ireq {
*/
#define XFS_BULK_IREQ_SPECIAL (1 << 1)
-#define XFS_BULK_IREQ_FLAGS_ALL (XFS_BULK_IREQ_AGNO | \
- XFS_BULK_IREQ_SPECIAL)
+/*
+ * Return data fork extent count via xfs_bulkstat->bs_extents64 field and assign
+ * 0 to xfs_bulkstat->bs_extents when the flag is set. Otherwise, use
+ * xfs_bulkstat->bs_extents for returning data fork extent count and set
+ * xfs_bulkstat->bs_extents64 to 0. In the second case, return -EOVERFLOW and
+ * assign 0 to xfs_bulkstat->bs_extents if data fork extent count is larger than
+ * XFS_MAX_EXTCNT_DATA_FORK_OLD.
+ */
+#define XFS_BULK_IREQ_NREXT64 (1 << 3)
+
+#define XFS_BULK_IREQ_FLAGS_ALL (XFS_BULK_IREQ_AGNO | \
+ XFS_BULK_IREQ_SPECIAL | \
+ XFS_BULK_IREQ_NREXT64)
/* Operate on the root directory inode. */
#define XFS_BULK_IREQ_SPECIAL_ROOT (1)
@@ -94,6 +94,14 @@ field.
This flag may not be set at the same time as the
.B XFS_BULK_IREQ_AGNO
flag.
+.TP
+.B XFS_BULK_IREQ_NREXT64
+If this is set, data fork extent count is returned via bs_extents64 field and
+0 is assigned to bs_extents. Otherwise, return data fork extent count via
+bs_extents field and assign 0 to bs_extents64. In the second case, -EOVERFLOW
+is returned and 0 is assigned to bs_extents if data fork extent count is
+larger than 2^31. This flag may be set independently of whether other flags
+have been set.
.RE
.PP
.I hdr.icount
@@ -161,8 +169,9 @@ struct xfs_bulkstat {
uint16_t bs_checked;
uint16_t bs_mode;
uint16_t bs_pad2;
+ uint64_t bs_extents64;
- uint64_t bs_pad[7];
+ uint64_t bs_pad[6];
};
.fi
.in