diff mbox series

[v3,07/10] object-file: drop `index_blob_stream()`

Message ID 20250415-pks-split-object-file-v3-7-6aa7db7ad7b0@pks.im (mailing list archive)
State New
Headers show
Series Split up "object-file.c" | expand

Commit Message

Patrick Steinhardt April 15, 2025, 9:38 a.m. UTC
The `index_blob_stream()` function is a mere wrapper around
`index_blob_bulk_checkin()`. This has been the case since 568508e7657
(bulk-checkin: replace fast-import based implementation, 2011-10-28),
which has moved the implementation from `index_blob_stream()` (which was
still called `index_stream()`) into `index_bulk_checkin()` (which has
since been renamed to `index_blob_bulk_checkin()`).

Remove the redirection by dropping the wrapper. Move the comment to
`index_blob_bulk_checkin()` to retain its context.

Signed-off-by: Patrick Steinhardt <ps@pks.im>
---
 bulk-checkin.h | 15 +++++++++++++++
 object-file.c  | 26 ++------------------------
 2 files changed, 17 insertions(+), 24 deletions(-)
diff mbox series

Patch

diff --git a/bulk-checkin.h b/bulk-checkin.h
index aa7286a7b3e..7246ea58dcf 100644
--- a/bulk-checkin.h
+++ b/bulk-checkin.h
@@ -9,6 +9,21 @@ 
 void prepare_loose_object_bulk_checkin(void);
 void fsync_loose_object_bulk_checkin(int fd, const char *filename);
 
+/*
+ * This creates one packfile per large blob unless bulk-checkin
+ * machinery is "plugged".
+ *
+ * This also bypasses the usual "convert-to-git" dance, and that is on
+ * purpose. We could write a streaming version of the converting
+ * functions and insert that before feeding the data to fast-import
+ * (or equivalent in-core API described above). However, that is
+ * somewhat complicated, as we do not know the size of the filter
+ * result, which we need to know beforehand when writing a git object.
+ * Since the primary motivation for trying to stream from the working
+ * tree file and to avoid mmaping it in core is to deal with large
+ * binary blobs, they generally do not want to get any conversion, and
+ * callers should avoid this code path when filters are requested.
+ */
 int index_blob_bulk_checkin(struct object_id *oid,
 			    int fd, size_t size,
 			    const char *path, unsigned flags);
diff --git a/object-file.c b/object-file.c
index 2051991f4de..6084d603136 100644
--- a/object-file.c
+++ b/object-file.c
@@ -1356,28 +1356,6 @@  static int index_core(struct index_state *istate,
 	return ret;
 }
 
-/*
- * This creates one packfile per large blob unless bulk-checkin
- * machinery is "plugged".
- *
- * This also bypasses the usual "convert-to-git" dance, and that is on
- * purpose. We could write a streaming version of the converting
- * functions and insert that before feeding the data to fast-import
- * (or equivalent in-core API described above). However, that is
- * somewhat complicated, as we do not know the size of the filter
- * result, which we need to know beforehand when writing a git object.
- * Since the primary motivation for trying to stream from the working
- * tree file and to avoid mmaping it in core is to deal with large
- * binary blobs, they generally do not want to get any conversion, and
- * callers should avoid this code path when filters are requested.
- */
-static int index_blob_stream(struct object_id *oid, int fd, size_t size,
-			     const char *path,
-			     unsigned flags)
-{
-	return index_blob_bulk_checkin(oid, fd, size, path, flags);
-}
-
 int index_fd(struct index_state *istate, struct object_id *oid,
 	     int fd, struct stat *st,
 	     enum object_type type, const char *path, unsigned flags)
@@ -1398,8 +1376,8 @@  int index_fd(struct index_state *istate, struct object_id *oid,
 		ret = index_core(istate, oid, fd, xsize_t(st->st_size),
 				 type, path, flags);
 	else
-		ret = index_blob_stream(oid, fd, xsize_t(st->st_size), path,
-					flags);
+		ret = index_blob_bulk_checkin(oid, fd, xsize_t(st->st_size), path,
+					     flags);
 	close(fd);
 	return ret;
 }