@@ -4442,28 +4442,6 @@ static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned
return distance < len;
}
-static void copy_pages(struct page *dst_page, struct page *src_page,
- unsigned long dst_off, unsigned long src_off,
- unsigned long len)
-{
- char *dst_kaddr = page_address(dst_page);
- char *src_kaddr;
- int must_memmove = 0;
-
- if (dst_page != src_page) {
- src_kaddr = page_address(src_page);
- } else {
- src_kaddr = dst_kaddr;
- if (areas_overlap(src_off, dst_off, len))
- must_memmove = 1;
- }
-
- if (must_memmove)
- memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
- else
- memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
-}
-
void memcpy_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
@@ -4494,23 +4472,26 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
- size_t cur;
- size_t dst_off_in_page;
- size_t src_off_in_page;
unsigned long dst_end = dst_offset + len - 1;
unsigned long src_end = src_offset + len - 1;
- unsigned long dst_i;
- unsigned long src_i;
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(dst, src_offset, len))
return;
+
if (dst_offset < src_offset) {
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}
+
while (len > 0) {
- dst_i = get_eb_page_index(dst_end);
+ unsigned long src_i;
+ size_t cur;
+ size_t dst_off_in_page;
+ size_t src_off_in_page;
+ void *src_addr;
+ bool use_memmove;
+
src_i = get_eb_page_index(src_end);
dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
@@ -4518,9 +4499,14 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
- copy_pages(dst->pages[dst_i], dst->pages[src_i],
- dst_off_in_page - cur + 1,
- src_off_in_page - cur + 1, cur);
+
+ src_addr = page_address(dst->pages[src_i]) + src_off_in_page -
+ cur + 1;
+ use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
+ cur);
+
+ __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
+ use_memmove);
dst_end -= cur;
src_end -= cur;
[BACKGROUND] Currently memove_extent_buffer() does a loop where it strop at any page boundary inside [dst_offset, dst_offset + len) or [src_offset, src_offset + len). This is mostly allowing us to do copy_pages(), but if we're going to use folios we will need to handle multi-page (the old behavior) or single folio (the new optimization). The current code would be a burden for future changes. [ENHANCEMENT] Instead of sticking with copy_pages(), here we utilize the new __write_extent_buffer() helper to handle the writes. Unlike the refactor in memcpy_extent_buffer(), we can not just rely on the write_extent_buffer() and only handle page boundaries inside src range. The function write_extent_buffer() itself is still doing forward writing, thus it can not handle the following case: (already in the extent buffer memory operation tests, cross page overlapping run 2) Src Page boundary |///////| |///|////| Dst In above case, if we just following page boundary in the src range, we have no need to do any split, just one __write_extent_buffer() with @use_memmove = true. But __write_extent_buffer() would split the dst range into two, so it first copies the beginning part of the src range into the first half of the dst range. After this operation, the beginning of the dst range is already updated, causing corruption. So we have to follow the old behavior of handling both page boundaries. And since we're the last caller of copy_pages(), we can remove it completely. Signed-off-by: Qu Wenruo <wqu@suse.com> --- fs/btrfs/extent_io.c | 48 ++++++++++++++++---------------------------- 1 file changed, 17 insertions(+), 31 deletions(-)