@@ -189,8 +189,14 @@ void xfrog_bstat_to_bulkstat(struct xfs_fd *xfd, struct xfs_bulkstat *bstat,
const struct xfs_bstat *bs1);
struct xfs_inogrp;
-int xfrog_inumbers(struct xfs_fd *xfd, uint64_t *lastino, uint32_t icount,
- struct xfs_inogrp *ubuffer, uint32_t *ocount);
+int xfrog_inumbers(struct xfs_fd *xfd, struct xfs_inumbers_req *req);
+
+struct xfs_inumbers_req *xfrog_inumbers_alloc_req(uint32_t nr,
+ uint64_t startino);
+void xfrog_inumbers_to_inogrp(struct xfs_inogrp *ig1,
+ const struct xfs_inumbers *ig);
+void xfrog_inogrp_to_inumbers(struct xfs_inumbers *ig,
+ const struct xfs_inogrp *ig1);
int xfrog_ag_geometry(int fd, unsigned int agno, struct xfs_ag_geometry *ageo);
@@ -16,9 +16,7 @@ static int
imap_f(int argc, char **argv)
{
struct xfs_fd xfd = XFS_FD_INIT(file->fd);
- struct xfs_inogrp *t;
- uint64_t last = 0;
- uint32_t count;
+ struct xfs_inumbers_req *ireq;
uint32_t nent;
int i;
int error;
@@ -28,17 +26,19 @@ imap_f(int argc, char **argv)
else
nent = atoi(argv[1]);
- t = malloc(nent * sizeof(*t));
- if (!t)
+ ireq = xfrog_inumbers_alloc_req(nent, 0);
+ if (!ireq) {
+ perror("alloc req");
return 0;
+ }
- while ((error = xfrog_inumbers(&xfd, &last, nent, t, &count)) == 0 &&
- count > 0) {
- for (i = 0; i < count; i++) {
- printf(_("ino %10llu count %2d mask %016llx\n"),
- (unsigned long long)t[i].xi_startino,
- t[i].xi_alloccount,
- (unsigned long long)t[i].xi_allocmask);
+ while ((error = xfrog_inumbers(&xfd, ireq)) == 0 &&
+ ireq->hdr.ocount > 0) {
+ for (i = 0; i < ireq->hdr.ocount; i++) {
+ printf(_("ino %10"PRIu64" count %2d mask %016"PRIx64"\n"),
+ ireq->inumbers[i].xi_startino,
+ ireq->inumbers[i].xi_alloccount,
+ ireq->inumbers[i].xi_allocmask);
}
}
@@ -46,7 +46,7 @@ imap_f(int argc, char **argv)
perror("xfsctl(XFS_IOC_FSINUMBERS)");
exitcode = 1;
}
- free(t);
+ free(ireq);
return 0;
}
@@ -674,35 +674,42 @@ static __u64
get_last_inode(void)
{
struct xfs_fd xfd = XFS_FD_INIT(file->fd);
- uint64_t lastip = 0;
+ struct xfs_inumbers_req *ireq;
uint32_t lastgrp = 0;
- uint32_t ocount = 0;
__u64 last_ino;
- struct xfs_inogrp igroup[IGROUP_NR];
+
+ ireq = xfrog_inumbers_alloc_req(IGROUP_NR, 0);
+ if (!ireq) {
+ perror("alloc req");
+ return 0;
+ }
for (;;) {
- if (xfrog_inumbers(&xfd, &lastip, IGROUP_NR, igroup,
- &ocount)) {
+ if (xfrog_inumbers(&xfd, ireq)) {
perror("XFS_IOC_FSINUMBERS");
+ free(ireq);
return 0;
}
/* Did we reach the last inode? */
- if (ocount == 0)
+ if (ireq->hdr.ocount == 0)
break;
/* last inode in igroup table */
- lastgrp = ocount;
+ lastgrp = ireq->hdr.ocount;
}
- if (lastgrp == 0)
+ if (lastgrp == 0) {
+ free(ireq);
return 0;
+ }
lastgrp--;
/* The last inode number in use */
- last_ino = igroup[lastgrp].xi_startino +
- libxfs_highbit64(igroup[lastgrp].xi_allocmask);
+ last_ino = ireq->inumbers[lastgrp].xi_startino +
+ libxfs_highbit64(ireq->inumbers[lastgrp].xi_allocmask);
+ free(ireq);
return last_ino;
}
@@ -384,21 +384,120 @@ xfrog_bulkstat_alloc_req(
return breq;
}
+/* Convert an inumbers (v5) struct to a inogrp (v1) struct. */
+void
+xfrog_inumbers_to_inogrp(
+ struct xfs_inogrp *ig1,
+ const struct xfs_inumbers *ig)
+{
+ ig1->xi_startino = ig->xi_startino;
+ ig1->xi_alloccount = ig->xi_alloccount;
+ ig1->xi_allocmask = ig->xi_allocmask;
+}
+
+/* Convert an inogrp (v1) struct to a inumbers (v5) struct. */
+void
+xfrog_inogrp_to_inumbers(
+ struct xfs_inumbers *ig,
+ const struct xfs_inogrp *ig1)
+{
+ memset(ig, 0, sizeof(*ig));
+ ig->xi_version = XFS_INUMBERS_VERSION_V1;
+
+ ig->xi_startino = ig1->xi_startino;
+ ig->xi_alloccount = ig1->xi_alloccount;
+ ig->xi_allocmask = ig1->xi_allocmask;
+}
+
+static uint64_t xfrog_inum_ino(void *v1_rec)
+{
+ return ((struct xfs_inogrp *)v1_rec)->xi_startino;
+}
+
+static void xfrog_inum_cvt(struct xfs_fd *xfd, void *v5, void *v1)
+{
+ xfrog_inogrp_to_inumbers(v5, v1);
+}
+
+/* Query inode allocation bitmask information using v5 ioctl. */
+static int
+xfrog_inumbers5(
+ struct xfs_fd *xfd,
+ struct xfs_inumbers_req *req)
+{
+ return ioctl(xfd->fd, XFS_IOC_INUMBERS, req);
+}
+
+/* Query inode allocation bitmask information using v1 ioctl. */
+static int
+xfrog_inumbers1(
+ struct xfs_fd *xfd,
+ struct xfs_inumbers_req *req)
+{
+ struct xfs_fsop_bulkreq bulkreq = { 0 };
+ int error;
+
+ error = xfrog_bulkstat_prep_v1_emulation(xfd);
+ if (error)
+ return error;
+
+ error = xfrog_bulk_req_setup(xfd, &req->hdr, &bulkreq,
+ sizeof(struct xfs_inogrp));
+ if (error == XFROG_ITER_ABORT)
+ goto out_teardown;
+ if (error < 0)
+ return error;
+
+ error = ioctl(xfd->fd, XFS_IOC_FSINUMBERS, &bulkreq);
+
+out_teardown:
+ return xfrog_bulk_req_teardown(xfd, &req->hdr, &bulkreq,
+ sizeof(struct xfs_inogrp), xfrog_inum_ino,
+ &req->inumbers, sizeof(struct xfs_inumbers),
+ xfrog_inum_cvt, 64, error);
+}
+
/* Query inode allocation bitmask information. */
int
xfrog_inumbers(
struct xfs_fd *xfd,
- uint64_t *lastino,
- uint32_t icount,
- struct xfs_inogrp *ubuffer,
- uint32_t *ocount)
+ struct xfs_inumbers_req *req)
{
- struct xfs_fsop_bulkreq bulkreq = {
- .lastip = (__u64 *)lastino,
- .icount = icount,
- .ubuffer = ubuffer,
- .ocount = (__s32 *)ocount,
- };
-
- return ioctl(xfd->fd, XFS_IOC_FSINUMBERS, &bulkreq);
+ int error;
+
+ if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1)
+ goto try_v1;
+
+ error = xfrog_inumbers5(xfd, req);
+ if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5))
+ return 0;
+
+ /* If the v5 ioctl wasn't found, we punt to v1. */
+ switch (errno) {
+ case EOPNOTSUPP:
+ case ENOTTY:
+ xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
+ break;
+ }
+
+try_v1:
+ return xfrog_inumbers1(xfd, req);
+}
+
+/* Allocate a inumbers request. */
+struct xfs_inumbers_req *
+xfrog_inumbers_alloc_req(
+ uint32_t nr,
+ uint64_t startino)
+{
+ struct xfs_inumbers_req *ireq;
+
+ ireq = calloc(1, XFS_INUMBERS_REQ_SIZE(nr));
+ if (!ireq)
+ return NULL;
+
+ ireq->hdr.icount = nr;
+ ireq->hdr.ino = startino;
+
+ return ireq;
}
@@ -42,23 +42,28 @@ xfs_count_inodes_range(
uint64_t last_ino,
uint64_t *count)
{
- struct xfs_inogrp inogrp;
- uint64_t igrp_ino;
+ struct xfs_inumbers_req *ireq;
uint64_t nr = 0;
- uint32_t igrplen = 0;
int error;
ASSERT(!(first_ino & (XFS_INODES_PER_CHUNK - 1)));
ASSERT((last_ino & (XFS_INODES_PER_CHUNK - 1)));
- igrp_ino = first_ino;
- while (!(error = xfrog_inumbers(&ctx->mnt, &igrp_ino, 1, &inogrp,
- &igrplen))) {
- if (igrplen == 0 || inogrp.xi_startino >= last_ino)
+ ireq = xfrog_inumbers_alloc_req(1, first_ino);
+ if (!ireq) {
+ str_info(ctx, descr, _("Insufficient memory; giving up."));
+ return false;
+ }
+
+ while (!(error = xfrog_inumbers(&ctx->mnt, ireq))) {
+ if (ireq->hdr.ocount == 0 ||
+ ireq->inumbers[0].xi_startino >= last_ino)
break;
- nr += inogrp.xi_alloccount;
+ nr += ireq->inumbers[0].xi_alloccount;
}
+ free(ireq);
+
if (error) {
str_errno(ctx, descr);
return false;
@@ -48,7 +48,7 @@
static void
xfs_iterate_inodes_range_check(
struct scrub_ctx *ctx,
- struct xfs_inogrp *inogrp,
+ struct xfs_inumbers *inogrp,
struct xfs_bulkstat *bstat)
{
struct xfs_bulkstat *bs;
@@ -91,13 +91,12 @@ xfs_iterate_inodes_range(
void *arg)
{
struct xfs_handle handle;
- struct xfs_inogrp inogrp;
+ struct xfs_inumbers_req *ireq;
struct xfs_bulkstat_req *breq;
char idescr[DESCR_BUFSZ];
char buf[DESCR_BUFSZ];
struct xfs_bulkstat *bs;
- uint64_t igrp_ino;
- uint32_t igrplen = 0;
+ struct xfs_inumbers *inogrp;
bool moveon = true;
int i;
int error;
@@ -114,29 +113,36 @@ xfs_iterate_inodes_range(
return false;
}
+ ireq = xfrog_inumbers_alloc_req(1, first_ino);
+ if (!ireq) {
+ str_info(ctx, descr, _("Insufficient memory; giving up."));
+ free(breq);
+ return false;
+ }
+ inogrp = &ireq->inumbers[0];
+
/* Find the inode chunk & alloc mask */
- igrp_ino = first_ino;
- error = xfrog_inumbers(&ctx->mnt, &igrp_ino, 1, &inogrp, &igrplen);
- while (!error && igrplen) {
+ error = xfrog_inumbers(&ctx->mnt, ireq);
+ while (!error && ireq->hdr.ocount > 0) {
/*
* We can have totally empty inode chunks on filesystems where
* there are more than 64 inodes per block. Skip these.
*/
- if (inogrp.xi_alloccount == 0)
+ if (inogrp->xi_alloccount == 0)
goto igrp_retry;
- breq->hdr.ino = inogrp.xi_startino;
- breq->hdr.icount = inogrp.xi_alloccount;
+ breq->hdr.ino = inogrp->xi_startino;
+ breq->hdr.icount = inogrp->xi_alloccount;
error = xfrog_bulkstat(&ctx->mnt, breq);
if (error)
str_info(ctx, descr, "%s", strerror_r(errno,
buf, DESCR_BUFSZ));
- xfs_iterate_inodes_range_check(ctx, &inogrp, breq->bulkstat);
+ xfs_iterate_inodes_range_check(ctx, inogrp, breq->bulkstat);
/* Iterate all the inodes. */
for (i = 0, bs = breq->bulkstat;
- i < inogrp.xi_alloccount;
+ i < inogrp->xi_alloccount;
i++, bs++) {
if (bs->bs_ino > last_ino)
goto out;
@@ -150,7 +156,7 @@ xfs_iterate_inodes_range(
case ESTALE:
stale_count++;
if (stale_count < 30) {
- igrp_ino = inogrp.xi_startino;
+ ireq->hdr.ino = inogrp->xi_startino;
goto igrp_retry;
}
snprintf(idescr, DESCR_BUFSZ, "inode %"PRIu64,
@@ -174,8 +180,7 @@ _("Changed too many times during scan; giving up."));
stale_count = 0;
igrp_retry:
- error = xfrog_inumbers(&ctx->mnt, &igrp_ino, 1, &inogrp,
- &igrplen);
+ error = xfrog_inumbers(&ctx->mnt, ireq);
}
err:
@@ -183,6 +188,7 @@ _("Changed too many times during scan; giving up."));
str_errno(ctx, descr);
moveon = false;
}
+ free(ireq);
free(breq);
out:
return moveon;