@@ -9,24 +9,11 @@
#include <linux/iomap.h>
#include "trace.h"
-/*
- * Execute a iomap write on a segment of the mapping that spans a
- * contiguous range of pages that have identical block mapping state.
- *
- * This avoids the need to map pages individually, do individual allocations
- * for each page and most importantly avoid the need for filesystem specific
- * locking per page. Instead, all the operations are amortised over the entire
- * range of pages. It is assumed that the filesystems will lock whatever
- * resources they require in the iomap_begin call, and release them in the
- * iomap_end call.
- */
-loff_t
-iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
- const struct iomap_ops *ops, void *data, iomap_actor_t actor)
+loff_t __iomap_apply(struct inode *inode, loff_t pos, loff_t length,
+ unsigned flags, struct iomap *iomap, struct iomap *srcmap,
+ const struct iomap_ops *ops, iomap_actor_t actor)
{
- struct iomap iomap = { .type = IOMAP_HOLE };
- struct iomap srcmap = { .type = IOMAP_HOLE };
- loff_t written = 0, ret;
+ loff_t ret;
u64 end;
trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_);
@@ -43,52 +30,26 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
* expose transient stale data. If the reserve fails, we can safely
* back out at this point as there is nothing to undo.
*/
- ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
+ ret = ops->iomap_begin(inode, pos, length, flags, iomap, srcmap);
if (ret)
return ret;
- if (WARN_ON(iomap.offset > pos))
+ if (WARN_ON(iomap->offset > pos))
return -EIO;
- if (WARN_ON(iomap.length == 0))
+ if (WARN_ON(iomap->length == 0))
return -EIO;
- trace_iomap_apply_dstmap(inode, &iomap);
- if (srcmap.type != IOMAP_HOLE)
- trace_iomap_apply_srcmap(inode, &srcmap);
+ trace_iomap_apply_dstmap(inode, iomap);
+ if (srcmap->type != IOMAP_HOLE)
+ trace_iomap_apply_srcmap(inode, srcmap);
/*
* Cut down the length to the one actually provided by the filesystem,
* as it might not be able to give us the whole size that we requested.
*/
- end = iomap.offset + iomap.length;
- if (srcmap.type != IOMAP_HOLE)
- end = min(end, srcmap.offset + srcmap.length);
+ end = iomap->offset + iomap->length;
+ if (srcmap->type != IOMAP_HOLE)
+ end = min(end, srcmap->offset + srcmap->length);
if (pos + length > end)
length = end - pos;
-
- /*
- * Now that we have guaranteed that the space allocation will succeed,
- * we can do the copy-in page by page without having to worry about
- * failures exposing transient data.
- *
- * To support COW operations, we read in data for partially blocks from
- * the srcmap if the file system filled it in. In that case we the
- * length needs to be limited to the earlier of the ends of the iomaps.
- * If the file system did not provide a srcmap we pass in the normal
- * iomap into the actors so that they don't need to have special
- * handling for the two cases.
- */
- written = actor(inode, pos, length, data, &iomap,
- srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
-
- /*
- * Now the data has been copied, commit the range we've copied. This
- * should not fail unless the filesystem has had a fatal error.
- */
- if (ops->iomap_end) {
- ret = ops->iomap_end(inode, pos, length,
- written > 0 ? written : 0,
- flags, &iomap);
- }
-
- return written ? written : ret;
+ return length;
}
@@ -148,9 +148,62 @@ struct iomap_ops {
typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
void *data, struct iomap *iomap, struct iomap *srcmap);
-loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, const struct iomap_ops *ops, void *data,
- iomap_actor_t actor);
+loff_t __iomap_apply(struct inode *inode, loff_t pos, loff_t length,
+ unsigned flags, struct iomap *iomap, struct iomap *srcmap,
+ const struct iomap_ops *ops, iomap_actor_t actor);
+
+/*
+ * Execute a iomap write on a segment of the mapping that spans a
+ * contiguous range of pages that have identical block mapping state.
+ *
+ * This avoids the need to map pages individually, do individual allocations
+ * for each page and most importantly avoid the need for filesystem specific
+ * locking per page. Instead, all the operations are amortised over the entire
+ * range of pages. It is assumed that the filesystems will lock whatever
+ * resources they require in the iomap_begin call, and release them in the
+ * iomap_end call.
+ */
+static inline loff_t
+iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
+ const struct iomap_ops *ops, void *data, iomap_actor_t actor)
+{
+ struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap srcmap = { .type = IOMAP_HOLE };
+ loff_t written;
+
+ length = __iomap_apply(inode, pos, length, flags, &iomap, &srcmap,
+ ops, actor);
+ /*
+ * Now that we have guaranteed that the space allocation will succeed,
+ * we can do the copy-in page by page without having to worry about
+ * failures exposing transient data.
+ *
+ * To support COW operations, we read in data for partially blocks from
+ * the srcmap if the file system filled it in. In that case we the
+ * length needs to be limited to the earlier of the ends of the iomaps.
+ * If the file system did not provide a srcmap we pass in the normal
+ * iomap into the actors so that they don't need to have special
+ * handling for the two cases.
+ */
+ if (length < 0)
+ return length;
+
+ written = actor(inode, pos, length, data, &iomap,
+ srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
+ /*
+ * Now the data has been copied, commit the range we've copied. This
+ * should not fail unless the filesystem has had a fatal error.
+ */
+ if (ops->iomap_end) {
+ loff_t ret = ops->iomap_end(inode, pos, length,
+ written > 0 ? written : 0,
+ flags, &iomap);
+ if (!written)
+ written = ret;
+ }
+
+ return written;
+}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);