diff mbox

btrfs: file.c: file cleanup

Message ID 1481897853-13232-1-git-send-email-phil@philippeloctaux.com (mailing list archive)
State Superseded
Headers show

Commit Message

Philippe Loctaux Dec. 16, 2016, 2:17 p.m. UTC
cleaned up the file with checkpatch

Signed-off-by: Philippe Loctaux <phil@philippeloctaux.com>
---
 fs/btrfs/file.c | 24 ++++++++++++++----------
 1 file changed, 14 insertions(+), 10 deletions(-)

Comments

David Sterba Jan. 3, 2017, 4:02 p.m. UTC | #1
On Fri, Dec 16, 2016 at 03:17:33PM +0100, Philippe Loctaux wrote:
> cleaned up the file with checkpatch
                      ^^^^^^^^^^^^^^^

Sorry, this is an example of what should not be done. Checkpatch can
detect lots of things that once were valid or tolerated but are not
today. There are minor coding style violations that have accumulated
over the years, missing newline here and there or the extra { } around
statements. We can live with that and change it eventually when
modifying the surrounding code. I'm happy to apply cleanup patches that
are a byproduct of actually reading the code, but just running
checkpatch and generating patches is below the treshold.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3a14c87..d131b8d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -464,6 +464,7 @@  static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 {
 	size_t i;
+
 	for (i = 0; i < num_pages; i++) {
 		/* page checked is some magic around finding pages that
 		 * have been modified without going through btrfs_set_page_dirty
@@ -509,6 +510,7 @@  int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
 
 	for (i = 0; i < num_pages; i++) {
 		struct page *p = pages[i];
+
 		SetPageUptodate(p);
 		ClearPageChecked(p);
 		set_page_dirty(p);
@@ -1421,6 +1423,7 @@  lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
 
 	if (start_pos < inode->i_size) {
 		struct btrfs_ordered_extent *ordered;
+
 		lock_extent_bits(&BTRFS_I(inode)->io_tree,
 				 start_pos, last_pos, cached_state);
 		ordered = btrfs_lookup_ordered_range(inode, start_pos,
@@ -1481,9 +1484,8 @@  static noinline int check_can_nocow(struct inode *inode, loff_t pos,
 		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
 						     lockend - lockstart + 1);
-		if (!ordered) {
+		if (!ordered)
 			break;
-		}
 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
 		btrfs_start_ordered_extent(inode, ordered, 1);
 		btrfs_put_ordered_extent(ordered);
@@ -1495,7 +1497,7 @@  static noinline int check_can_nocow(struct inode *inode, loff_t pos,
 		ret = 0;
 		btrfs_end_write_no_snapshoting(root);
 	} else {
-		*write_bytes = min_t(size_t, *write_bytes ,
+		*write_bytes = min_t(size_t, *write_bytes,
 				     num_bytes - pos + lockstart);
 	}
 
@@ -1727,8 +1729,8 @@  static noinline ssize_t __btrfs_buffered_write(struct file *file,
 			btrfs_delalloc_release_metadata(inode, release_bytes);
 		} else {
 			btrfs_delalloc_release_space(inode,
-						round_down(pos, root->sectorsize),
-						release_bytes);
+					round_down(pos, root->sectorsize),
+					release_bytes);
 		}
 	}
 
@@ -1904,8 +1906,8 @@  int btrfs_release_file(struct inode *inode, struct file *filp)
 	 * application were using truncate to replace a file in place.
 	 */
 	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
-			       &BTRFS_I(inode)->runtime_flags))
-			filemap_flush(inode->i_mapping);
+		&BTRFS_I(inode)->runtime_flags))
+		filemap_flush(inode->i_mapping);
 	return 0;
 }
 
@@ -1943,8 +1945,9 @@  int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	u64 len;
 
 	/*
-	 * The range length can be represented by u64, we have to do the typecasts
-	 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
+	 * The range length can be represented by u64, we have to do the
+	 * typecasts to avoid signed overflow if it's [0, LLONG_MAX]
+	 * eg. from fsync()
 	 */
 	len = (u64)end - (u64)start + 1;
 	trace_btrfs_sync_file(file, datasync);
@@ -2409,7 +2412,8 @@  static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	/* Check the aligned pages after the first unaligned page,
 	 * if offset != orig_start, which means the first unaligned page
 	 * including several following pages are already in holes,
-	 * the extra check can be skipped */
+	 * the extra check can be skipped
+	 */
 	if (offset == orig_start) {
 		/* after truncate page, check hole again */
 		len = offset + len - lockstart;