@@ -258,6 +258,7 @@ AC_HAVE_MEMFD_CLOEXEC
AC_HAVE_MEMFD_NOEXEC_SEAL
AC_HAVE_O_TMPFILE
AC_HAVE_MKOSTEMP_CLOEXEC
+AC_USE_RADIX_TREE_FOR_INUMS
AC_CONFIG_FILES([include/builddefs])
AC_OUTPUT
@@ -138,6 +138,7 @@ HAVE_MEMFD_CLOEXEC = @have_memfd_cloexec@
HAVE_MEMFD_NOEXEC_SEAL = @have_memfd_noexec_seal@
HAVE_O_TMPFILE = @have_o_tmpfile@
HAVE_MKOSTEMP_CLOEXEC = @have_mkostemp_cloexec@
+USE_RADIX_TREE_FOR_INUMS = @use_radix_tree_for_inums@
GCCFLAGS = -funsigned-char -fno-strict-aliasing -Wall
# -Wbitwise -Wno-transparent-union -Wno-old-initializer -Wno-decl
@@ -612,3 +612,23 @@ AC_DEFUN([AC_HAVE_MKOSTEMP_CLOEXEC],
AC_MSG_RESULT(yes)],[AC_MSG_RESULT(no)])
AC_SUBST(have_mkostemp_cloexec)
])
+
+#
+# Check if the radix tree index (unsigned long) is large enough to hold a
+# 64-bit inode number
+#
+AC_DEFUN([AC_USE_RADIX_TREE_FOR_INUMS],
+ [ AC_MSG_CHECKING([if radix tree can store XFS inums])
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+#include <sys/param.h>
+#include <stdint.h>
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+ ]], [[
+ typedef uint64_t xfs_ino_t;
+
+ BUILD_BUG_ON(sizeof(unsigned long) < sizeof(xfs_ino_t));
+ return 0;
+ ]])],[use_radix_tree_for_inums=yes
+ AC_MSG_RESULT(yes)],[AC_MSG_RESULT(no)])
+ AC_SUBST(use_radix_tree_for_inums)
+ ])
@@ -22,6 +22,10 @@ ifeq ($(HAVE_GETFSMAP),yes)
CFILES += freesp.c clearfree.c
endif
+ifeq ($(USE_RADIX_TREE_FOR_INUMS),yes)
+LCFLAGS += -DUSE_RADIX_TREE_FOR_INUMS
+endif
+
default: depend $(LTCOMMAND)
include $(BUILDRULES)
@@ -6,7 +6,11 @@
#include "libxfs.h"
#include "libfrog/fsgeom.h"
+#ifdef USE_RADIX_TREE_FOR_INUMS
#include "libfrog/radix-tree.h"
+#else
+#include "libfrog/avl64.h"
+#endif /* USE_RADIX_TREE_FOR_INUMS */
#include "libfrog/paths.h"
#include "command.h"
#include "init.h"
@@ -24,6 +28,7 @@ get_reloc_count(void)
return inode_count;
}
+#ifdef USE_RADIX_TREE_FOR_INUMS
static RADIX_TREE(relocation_data, 0);
bool
@@ -112,3 +117,201 @@ forget_reloc_ino(
{
radix_tree_delete(&relocation_data, ino);
}
+#else
+struct reloc_node {
+ struct avl64node node;
+ uint64_t ino;
+ struct inode_path *ipath;
+ unsigned int flags;
+};
+
+static uint64_t
+reloc_start(
+ struct avl64node *node)
+{
+ struct reloc_node *rln;
+
+ rln = container_of(node, struct reloc_node, node);
+ return rln->ino;
+}
+
+static uint64_t
+reloc_end(
+ struct avl64node *node)
+{
+ struct reloc_node *rln;
+
+ rln = container_of(node, struct reloc_node, node);
+ return rln->ino + 1;
+}
+
+static struct avl64ops reloc_ops = {
+ reloc_start,
+ reloc_end,
+};
+
+static struct avl64tree_desc relocation_data = {
+ .avl_ops = &reloc_ops,
+};
+
+bool
+is_reloc_populated(void)
+{
+ return relocation_data.avl_firstino != NULL;
+}
+
+static inline struct reloc_node *
+reloc_lookup(
+ uint64_t ino)
+{
+ avl64node_t *node;
+
+ node = avl64_find(&relocation_data, ino);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct reloc_node, node);
+}
+
+static inline struct reloc_node *
+reloc_insert(
+ uint64_t ino)
+{
+ struct reloc_node *rln;
+ avl64node_t *node;
+
+ rln = malloc(sizeof(struct reloc_node));
+ if (!rln)
+ return NULL;
+
+ rln->node.avl_nextino = NULL;
+ rln->ino = ino;
+ rln->ipath = UNLINKED_IPATH;
+ rln->flags = 0;
+
+ node = avl64_insert(&relocation_data, &rln->node);
+ if (node == NULL) {
+ free(rln);
+ return NULL;
+ }
+
+ return rln;
+}
+
+bool
+test_reloc_iflag(
+ uint64_t ino,
+ unsigned int flag)
+{
+ struct reloc_node *rln;
+
+ rln = reloc_lookup(ino);
+ if (!rln)
+ return false;
+
+ return rln->flags & flag;
+}
+
+void
+set_reloc_iflag(
+ uint64_t ino,
+ unsigned int flag)
+{
+ struct reloc_node *rln;
+
+ rln = reloc_lookup(ino);
+ if (!rln) {
+ rln = reloc_insert(ino);
+ if (!rln)
+ abort();
+ if (flag != INODE_PATH)
+ inode_count++;
+ }
+ if (flag == INODE_PATH)
+ inode_paths++;
+
+ rln->flags |= flag;
+}
+
+#define avl_for_each_range_safe(pos, n, l, first, last) \
+ for (pos = (first), n = pos->avl_nextino, l = (last)->avl_nextino; \
+ pos != (l); \
+ pos = n, n = pos ? pos->avl_nextino : NULL)
+
+struct inode_path *
+get_next_reloc_ipath(
+ uint64_t ino)
+{
+ struct avl64node *firstn;
+ struct avl64node *lastn;
+ struct avl64node *pos;
+ struct avl64node *n;
+ struct avl64node *l;
+ struct reloc_node *rln;
+
+ avl64_findranges(&relocation_data, ino - 1, -1ULL, &firstn, &lastn);
+ if (firstn == NULL && lastn == NULL)
+ return NULL;
+
+ avl_for_each_range_safe(pos, n, l, firstn, lastn) {
+ rln = container_of(pos, struct reloc_node, node);
+
+ if (rln->flags & INODE_PATH)
+ return rln->ipath;
+ }
+
+ return NULL;
+}
+
+uint64_t
+get_next_reloc_unlinked(
+ uint64_t ino)
+{
+ struct avl64node *firstn;
+ struct avl64node *lastn;
+ struct avl64node *pos;
+ struct avl64node *n;
+ struct avl64node *l;
+ struct reloc_node *rln;
+
+ avl64_findranges(&relocation_data, ino - 1, -1ULL, &firstn, &lastn);
+ if (firstn == NULL && lastn == NULL)
+ return 0;
+
+ avl_for_each_range_safe(pos, n, l, firstn, lastn) {
+ rln = container_of(pos, struct reloc_node, node);
+
+ if (!(rln->flags & INODE_PATH))
+ return rln->ino;
+ }
+
+ return 0;
+}
+
+struct inode_path **
+get_reloc_ipath_slot(
+ uint64_t ino)
+{
+ struct reloc_node *rln;
+
+ rln = reloc_lookup(ino);
+ if (!rln)
+ return NULL;
+
+ return &rln->ipath;
+}
+
+void
+forget_reloc_ino(
+ uint64_t ino)
+{
+ struct reloc_node *rln;
+
+ rln = reloc_lookup(ino);
+ if (!rln)
+ return;
+
+ avl64_delete(&relocation_data, &rln->node);
+ free(rln);
+}
+#endif /* USE_RADIX_TREE_FOR_INUMS */