diff mbox series

[v3,1/4] generic/020: move MAX_ATTRS and MAX_ATTRVAL_SIZE logic

Message ID 20220413164422.30077-2-ddiss@suse.de (mailing list archive)
State New, archived
Headers show
Series : generic/020: fix MAX_ATTRVAL_SIZE values | expand

Commit Message

David Disseldorp April 13, 2022, 4:44 p.m. UTC
No functional change. MAX_ATTRS and MAX_ATTRVAL_SIZE are only used
within generic/020, so move the logic for determining these values
over there.

Signed-off-by: David Disseldorp <ddiss@suse.de>
---
 common/attr       | 75 ----------------------------------------------
 tests/generic/020 | 76 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 76 insertions(+), 75 deletions(-)
diff mbox series

Patch

diff --git a/common/attr b/common/attr
index dae8a1bb..cce4d1b2 100644
--- a/common/attr
+++ b/common/attr
@@ -264,80 +264,5 @@  _getfattr()
 	return ${PIPESTATUS[0]}
 }
 
-# set maximum total attr space based on fs type
-case "$FSTYP" in
-xfs|udf|pvfs2|9p|ceph|nfs)
-	MAX_ATTRS=1000
-	;;
-ext2|ext3|ext4)
-	# For 4k blocksizes, most of the attributes have an attr_name of
-	# "attribute_NN" which is 12, and "value_NN" which is 8.
-	# But for larger block sizes, we start having extended attributes of the
-	# form "attribute_NNN" or "attribute_NNNN", and "value_NNN" and
-	# "value_NNNN", which causes the round(len(..), 4) to jump up by 4
-	# bytes.  So round_up(len(attr_name, 4)) becomes 16 instead of 12, and
-	# round_up(len(value, 4)) becomes 12 instead of 8.
-	#
-	# For 64K blocksize the calculation becomes
-	# 	max_attrs = (block_size - 32) / (16 + 12 + 16)
-	# or
-	# 	max_attrs = (block_size - 32) / 44
-	#
-	# For 4K blocksize:-
-	# 	max_attrs = (block_size - 32) / (16 + 8 + 12)
-	# or
-	# 	max_attrs = (block_size - 32) / 36
-	#
-	# Note (for 4K bs) above are exact calculations for attrs of type
-	# attribute_NN with values of type value_NN.
-	# With above calculations, for 4k blocksize max_attrs becomes 112.
-	# This means we can have few attrs of type attribute_NNN with values of
-	# type value_NNN. To avoid/handle this we need to add extra 4 bytes of
-	# headroom.
-	#
-	# So for 4K, the calculations becomes:-
-	# 	max_attrs = (block_size - 32) / (16 + 8 + 12 + 4)
-	# or
-	# 	max_attrs = (block_size - 32) / 40
-	#
-	# Assume max ~1 block of attrs
-	BLOCK_SIZE=`_get_block_size $TEST_DIR`
-	if [ $BLOCK_SIZE -le 4096 ]; then
-		let MAX_ATTRS=$((($BLOCK_SIZE - 32) / (16 + 8 + 12 + 4)))
-	else
-		let MAX_ATTRS=$((($BLOCK_SIZE - 32) / (16 + 12 + 16 )))
-	fi
-	;;
-*)
-	# Assume max ~1 block of attrs
-	BLOCK_SIZE=`_get_block_size $TEST_DIR`
-	# user.attribute_XXX="value.XXX" is about 32 bytes; leave some overhead
-	let MAX_ATTRS=$BLOCK_SIZE/40
-esac
-
-export MAX_ATTRS
-
-# Set max attr value size based on fs type
-case "$FSTYP" in
-xfs|udf|btrfs)
-	MAX_ATTRVAL_SIZE=64
-	;;
-pvfs2)
-	MAX_ATTRVAL_SIZE=8192
-	;;
-9p|ceph|nfs)
-	MAX_ATTRVAL_SIZE=65536
-	;;
-bcachefs)
-	MAX_ATTRVAL_SIZE=1024
-	;;
-*)
-	# Assume max ~1 block of attrs
-	BLOCK_SIZE=`_get_block_size $TEST_DIR`
-	# leave a little overhead
-	let MAX_ATTRVAL_SIZE=$BLOCK_SIZE-256
-esac
-
-export MAX_ATTRVAL_SIZE
 # make sure this script returns success
 /bin/true
diff --git a/tests/generic/020 b/tests/generic/020
index 29ef853c..c2c285f6 100755
--- a/tests/generic/020
+++ b/tests/generic/020
@@ -51,6 +51,82 @@  _attr_list()
     fi
 }
 
+# set maximum total attr space based on fs type
+case "$FSTYP" in
+xfs|udf|pvfs2|9p|ceph|nfs)
+	MAX_ATTRS=1000
+	;;
+ext2|ext3|ext4)
+	# For 4k blocksizes, most of the attributes have an attr_name of
+	# "attribute_NN" which is 12, and "value_NN" which is 8.
+	# But for larger block sizes, we start having extended attributes of the
+	# form "attribute_NNN" or "attribute_NNNN", and "value_NNN" and
+	# "value_NNNN", which causes the round(len(..), 4) to jump up by 4
+	# bytes.  So round_up(len(attr_name, 4)) becomes 16 instead of 12, and
+	# round_up(len(value, 4)) becomes 12 instead of 8.
+	#
+	# For 64K blocksize the calculation becomes
+	# 	max_attrs = (block_size - 32) / (16 + 12 + 16)
+	# or
+	# 	max_attrs = (block_size - 32) / 44
+	#
+	# For 4K blocksize:-
+	# 	max_attrs = (block_size - 32) / (16 + 8 + 12)
+	# or
+	# 	max_attrs = (block_size - 32) / 36
+	#
+	# Note (for 4K bs) above are exact calculations for attrs of type
+	# attribute_NN with values of type value_NN.
+	# With above calculations, for 4k blocksize max_attrs becomes 112.
+	# This means we can have few attrs of type attribute_NNN with values of
+	# type value_NNN. To avoid/handle this we need to add extra 4 bytes of
+	# headroom.
+	#
+	# So for 4K, the calculations becomes:-
+	# 	max_attrs = (block_size - 32) / (16 + 8 + 12 + 4)
+	# or
+	# 	max_attrs = (block_size - 32) / 40
+	#
+	# Assume max ~1 block of attrs
+	BLOCK_SIZE=`_get_block_size $TEST_DIR`
+	if [ $BLOCK_SIZE -le 4096 ]; then
+		let MAX_ATTRS=$((($BLOCK_SIZE - 32) / (16 + 8 + 12 + 4)))
+	else
+		let MAX_ATTRS=$((($BLOCK_SIZE - 32) / (16 + 12 + 16 )))
+	fi
+	;;
+*)
+	# Assume max ~1 block of attrs
+	BLOCK_SIZE=`_get_block_size $TEST_DIR`
+	# user.attribute_XXX="value.XXX" is about 32 bytes; leave some overhead
+	let MAX_ATTRS=$BLOCK_SIZE/40
+esac
+
+export MAX_ATTRS
+
+# Set max attr value size based on fs type
+case "$FSTYP" in
+xfs|udf|btrfs)
+	MAX_ATTRVAL_SIZE=64
+	;;
+pvfs2)
+	MAX_ATTRVAL_SIZE=8192
+	;;
+9p|ceph|nfs)
+	MAX_ATTRVAL_SIZE=65536
+	;;
+bcachefs)
+	MAX_ATTRVAL_SIZE=1024
+	;;
+*)
+	# Assume max ~1 block of attrs
+	BLOCK_SIZE=`_get_block_size $TEST_DIR`
+	# leave a little overhead
+	let MAX_ATTRVAL_SIZE=$BLOCK_SIZE-256
+esac
+
+export MAX_ATTRVAL_SIZE
+
 # real QA test starts here
 _supported_fs generic