diff mbox series

[RFC,5/5] Accelerate ensure_skip_worktree_means_skip_worktree by caching

Message ID 20220109045732.2497526-6-newren@gmail.com (mailing list archive)
State New, archived
Headers show
Series Remove the present-despite-SKIP_WORKTREE class of bugs | expand

Commit Message

Elijah Newren Jan. 9, 2022, 4:57 a.m. UTC
Rather than lstat()'ing every SKIP_WORKTREE path, take advantage of the
fact that entire directories will often be missing, especially for cone
mode and even more so ever since commit 55dfcf9591 ("sparse-checkout:
clear tracked sparse dirs", 2021-09-08).  If we have already determined
that the parent directory of a file (or any other previous ancestor)
does not exist, then we already know the file cannot exist and do not
need to lstat() it separately.

Granted, the cost of ensure_skip_worktree_means_skip_worktree() might
be considered a bit high for non-cone mode since it might now lstat()
every SKIP_WORKTREE path when the index is loaded (an O(N) cost, with
N the number of SKIP_WORKTREE paths), but non-cone mode users already
have to deal with the O(N*M) cost (with N=the number of tracked files
and M=the number of sparsity patterns), so this should be reasonable.

Signed-off-by: Elijah Newren <newren@gmail.com>
---
 sparse-index.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 103 insertions(+), 2 deletions(-)

Comments

Victoria Dye Jan. 11, 2022, 6:30 p.m. UTC | #1
Elijah Newren wrote:
> Rather than lstat()'ing every SKIP_WORKTREE path, take advantage of the
> fact that entire directories will often be missing, especially for cone
> mode and even more so ever since commit 55dfcf9591 ("sparse-checkout:
> clear tracked sparse dirs", 2021-09-08).  If we have already determined
> that the parent directory of a file (or any other previous ancestor)
> does not exist, then we already know the file cannot exist and do not
> need to lstat() it separately.
> 
> Granted, the cost of ensure_skip_worktree_means_skip_worktree() might
> be considered a bit high for non-cone mode since it might now lstat()
> every SKIP_WORKTREE path when the index is loaded (an O(N) cost, with
> N the number of SKIP_WORKTREE paths), but non-cone mode users already
> have to deal with the O(N*M) cost (with N=the number of tracked files
> and M=the number of sparsity patterns), so this should be reasonable.
> 

Did you write/run any performance tests to see how this optimization changed
the execution time? If not, running the `p2000` performance tests against
the patch series base, [3/5], and [5/5] would provide some really helpful
insight into the cost of `ensure_skip_worktree_means_skip_worktree`, then
how much this optimization improves it.

> Signed-off-by: Elijah Newren <newren@gmail.com>
> ---
>  sparse-index.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 103 insertions(+), 2 deletions(-)
> 
> diff --git a/sparse-index.c b/sparse-index.c
> index 79d50e444c..608782e255 100644
> --- a/sparse-index.c
> +++ b/sparse-index.c
> @@ -341,18 +341,117 @@ void ensure_correct_sparsity(struct index_state *istate)
>  		ensure_full_index(istate);
>  }
>  
> +struct path_cache_entry {
> +	struct hashmap_entry ent;
> +	const char *path;
> +	int path_length;
> +	int is_present;
> +};
> +
> +static int path_cache_cmp(const void *unused,
> +			  const struct hashmap_entry *entry1,
> +			  const struct hashmap_entry *entry2,
> +			  const void *also_unused)
> +{
> +	const struct path_cache_entry *e1, *e2;
> +
> +	e1 = container_of(entry1, const struct path_cache_entry, ent);
> +	e2 = container_of(entry2, const struct path_cache_entry, ent);
> +	if (e1->path_length != e2->path_length)
> +		return e1->path_length - e2->path_length;
> +	return memcmp(e1->path, e2->path, e1->path_length);
> +}
> +
> +static struct path_cache_entry *find_path_cache_entry(struct hashmap *map,
> +						      const char *str,
> +						      int str_length)
> +{
> +	struct path_cache_entry entry;
> +	hashmap_entry_init(&entry.ent, memhash(str, str_length));
> +	entry.path = str;
> +	entry.path_length = str_length;
> +	return hashmap_get_entry(map, &entry, ent, NULL);
> +}
> +
> +static void record(struct hashmap *path_cache,
> +		   struct mem_pool *pool,
> +		   const char *path,
> +		   int path_length,
> +		   int found)
> +{
> +	struct path_cache_entry *entry;
> +
> +	entry = mem_pool_alloc(pool, sizeof(*entry));
> +	hashmap_entry_init(&entry->ent, memhash(path, path_length));
> +	entry->path = path;
> +	entry->path_length = path_length;
> +	entry->is_present = found;
> +	hashmap_add(path_cache, &entry->ent);
> +}
> +
> +static int path_found(struct hashmap *path_cache, struct mem_pool *pool,
> +		      const char *path, int path_length)
> +{
> +	struct stat st;
> +	int found;
> +	const char *dirsep = path + path_length - 1;
> +	const char *tmp;
> +
> +	/* Find directory separator; memrchr is sadly glibc-specific */
> +	while (dirsep > path && *dirsep != '/')
> +		dirsep--;
> +
> +	/* If parent of path doesn't exist, no point lstat'ing path... */
> +	if (dirsep > path) {
> +		struct path_cache_entry *entry;
> +		int new_length, parent_found;
> +
> +		/* First, check if path's parent's existence was cached */
> +		new_length = dirsep - path;
> +		entry = find_path_cache_entry(path_cache, path, new_length);
> +		if (entry)
> +			parent_found = entry->is_present;
> +		else
> +			parent_found = path_found(path_cache, pool,
> +						  path, new_length);
> +
> +		if (!parent_found) {
> +			/* path can't exist if parent dir doesn't */
> +			record(path_cache, pool, path, path_length, 0);
> +			return 0;
> +		} /* else parent was found so must check path itself too... */
> +	}
> +
> +	/* Okay, parent dir exists, so we have to check original path */
> +
> +	/* Make sure we have a NUL-terminated string to pass to lstat */
> +	tmp = path;
> +	if (path[path_length])
> +		tmp = mem_pool_strndup(pool, path, path_length);
> +	/* Determine if path exists */
> +	found = !lstat(tmp, &st);
> +
> +	record(path_cache, pool, path, path_length, found);
> +	return found;
> +}
> +
>  void ensure_skip_worktree_means_skip_worktree(struct index_state *istate)
>  {
> +	struct hashmap path_cache = HASHMAP_INIT(path_cache_cmp, NULL);
> +	struct mem_pool pool;
> +
>  	int i;
> +
>  	if (!core_apply_sparse_checkout)
>  		return;
>  
> +	mem_pool_init(&pool, 32*1024);
>  restart:
>  	for (i = 0; i < istate->cache_nr; i++) {
>  		struct cache_entry *ce = istate->cache[i];
> -		struct stat st;
>  
> -		if (ce_skip_worktree(ce) && !lstat(ce->name, &st)) {
> +		if (ce_skip_worktree(ce) &&
> +		    path_found(&path_cache, &pool, ce->name, strlen(ce->name))) {
>  			if (S_ISSPARSEDIR(ce->ce_mode)) {
>  				ensure_full_index(istate);
>  				goto restart;
> @@ -360,6 +459,8 @@ void ensure_skip_worktree_means_skip_worktree(struct index_state *istate)
>  			ce->ce_flags &= ~CE_SKIP_WORKTREE;
>  		}
>  	}
> +	hashmap_clear(&path_cache);
> +	mem_pool_discard(&pool, 0);
>  }
>  
>
Elijah Newren Jan. 11, 2022, 10:04 p.m. UTC | #2
On Tue, Jan 11, 2022 at 10:30 AM Victoria Dye <vdye@github.com> wrote:
>
> Elijah Newren wrote:
> > Rather than lstat()'ing every SKIP_WORKTREE path, take advantage of the
> > fact that entire directories will often be missing, especially for cone
> > mode and even more so ever since commit 55dfcf9591 ("sparse-checkout:
> > clear tracked sparse dirs", 2021-09-08).  If we have already determined
> > that the parent directory of a file (or any other previous ancestor)
> > does not exist, then we already know the file cannot exist and do not
> > need to lstat() it separately.
> >
> > Granted, the cost of ensure_skip_worktree_means_skip_worktree() might
> > be considered a bit high for non-cone mode since it might now lstat()
> > every SKIP_WORKTREE path when the index is loaded (an O(N) cost, with
> > N the number of SKIP_WORKTREE paths), but non-cone mode users already
> > have to deal with the O(N*M) cost (with N=the number of tracked files
> > and M=the number of sparsity patterns), so this should be reasonable.
> >
>
> Did you write/run any performance tests to see how this optimization changed
> the execution time? If not, running the `p2000` performance tests against
> the patch series base, [3/5], and [5/5] would provide some really helpful
> insight into the cost of `ensure_skip_worktree_means_skip_worktree`, then
> how much this optimization improves it.

I haven't[1].  You bring up a very good point; I'll add it for the next round.

[1] Long, probably irrelevant story about why: My original patches
were actually going to go further and just remove the
present-despite-SKIP_WORKTREE files in _all_ cases, sparse-checkout or
not. It had not occurred to me while writing the patches to make it
specific to sparse-checkouts.  Because of that, I figured it was
better to get feedback on if the idea was acceptable and spent a lot
more time concentrating on making the case.  Then I realized near the
end that folks who don't use sparse-checkout or SKIP_WORKTREE might be
annoyed at the overhead also being added for them, for a feature
they'll never even use.  I decided to back off a bit, and make it
sparse-checkout specific.  Then I realized that backing off might just
keep all users happy anyway (the folks who intentionally use
present-despite-SKIP_WORKTREE paths, despite their many warts, can
keep doings so) and edited a lot of my commit messages, docs, and
cover letter.  And by then it was late Saturday night and I had
promised to send out my series on Friday.  Since I had already marked
my cover letter as RFC anyway, I just decided to temporarily punt on
getting performance numbers...
diff mbox series

Patch

diff --git a/sparse-index.c b/sparse-index.c
index 79d50e444c..608782e255 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -341,18 +341,117 @@  void ensure_correct_sparsity(struct index_state *istate)
 		ensure_full_index(istate);
 }
 
+struct path_cache_entry {
+	struct hashmap_entry ent;
+	const char *path;
+	int path_length;
+	int is_present;
+};
+
+static int path_cache_cmp(const void *unused,
+			  const struct hashmap_entry *entry1,
+			  const struct hashmap_entry *entry2,
+			  const void *also_unused)
+{
+	const struct path_cache_entry *e1, *e2;
+
+	e1 = container_of(entry1, const struct path_cache_entry, ent);
+	e2 = container_of(entry2, const struct path_cache_entry, ent);
+	if (e1->path_length != e2->path_length)
+		return e1->path_length - e2->path_length;
+	return memcmp(e1->path, e2->path, e1->path_length);
+}
+
+static struct path_cache_entry *find_path_cache_entry(struct hashmap *map,
+						      const char *str,
+						      int str_length)
+{
+	struct path_cache_entry entry;
+	hashmap_entry_init(&entry.ent, memhash(str, str_length));
+	entry.path = str;
+	entry.path_length = str_length;
+	return hashmap_get_entry(map, &entry, ent, NULL);
+}
+
+static void record(struct hashmap *path_cache,
+		   struct mem_pool *pool,
+		   const char *path,
+		   int path_length,
+		   int found)
+{
+	struct path_cache_entry *entry;
+
+	entry = mem_pool_alloc(pool, sizeof(*entry));
+	hashmap_entry_init(&entry->ent, memhash(path, path_length));
+	entry->path = path;
+	entry->path_length = path_length;
+	entry->is_present = found;
+	hashmap_add(path_cache, &entry->ent);
+}
+
+static int path_found(struct hashmap *path_cache, struct mem_pool *pool,
+		      const char *path, int path_length)
+{
+	struct stat st;
+	int found;
+	const char *dirsep = path + path_length - 1;
+	const char *tmp;
+
+	/* Find directory separator; memrchr is sadly glibc-specific */
+	while (dirsep > path && *dirsep != '/')
+		dirsep--;
+
+	/* If parent of path doesn't exist, no point lstat'ing path... */
+	if (dirsep > path) {
+		struct path_cache_entry *entry;
+		int new_length, parent_found;
+
+		/* First, check if path's parent's existence was cached */
+		new_length = dirsep - path;
+		entry = find_path_cache_entry(path_cache, path, new_length);
+		if (entry)
+			parent_found = entry->is_present;
+		else
+			parent_found = path_found(path_cache, pool,
+						  path, new_length);
+
+		if (!parent_found) {
+			/* path can't exist if parent dir doesn't */
+			record(path_cache, pool, path, path_length, 0);
+			return 0;
+		} /* else parent was found so must check path itself too... */
+	}
+
+	/* Okay, parent dir exists, so we have to check original path */
+
+	/* Make sure we have a NUL-terminated string to pass to lstat */
+	tmp = path;
+	if (path[path_length])
+		tmp = mem_pool_strndup(pool, path, path_length);
+	/* Determine if path exists */
+	found = !lstat(tmp, &st);
+
+	record(path_cache, pool, path, path_length, found);
+	return found;
+}
+
 void ensure_skip_worktree_means_skip_worktree(struct index_state *istate)
 {
+	struct hashmap path_cache = HASHMAP_INIT(path_cache_cmp, NULL);
+	struct mem_pool pool;
+
 	int i;
+
 	if (!core_apply_sparse_checkout)
 		return;
 
+	mem_pool_init(&pool, 32*1024);
 restart:
 	for (i = 0; i < istate->cache_nr; i++) {
 		struct cache_entry *ce = istate->cache[i];
-		struct stat st;
 
-		if (ce_skip_worktree(ce) && !lstat(ce->name, &st)) {
+		if (ce_skip_worktree(ce) &&
+		    path_found(&path_cache, &pool, ce->name, strlen(ce->name))) {
 			if (S_ISSPARSEDIR(ce->ce_mode)) {
 				ensure_full_index(istate);
 				goto restart;
@@ -360,6 +459,8 @@  void ensure_skip_worktree_means_skip_worktree(struct index_state *istate)
 			ce->ce_flags &= ~CE_SKIP_WORKTREE;
 		}
 	}
+	hashmap_clear(&path_cache);
+	mem_pool_discard(&pool, 0);
 }