diff mbox series

[v9,3/3] drm/tests: Add a test case for drm buddy clear allocation

Message ID 20240318214058.2014-3-Arunpravin.PaneerSelvam@amd.com (mailing list archive)
State New, archived
Headers show
Series [v9,1/3] drm/buddy: Implement tracking clear page feature | expand

Commit Message

Paneer Selvam, Arunpravin March 18, 2024, 9:40 p.m. UTC
Add a new test case for the drm buddy clear and dirty
allocation.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Suggested-by: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/tests/drm_buddy_test.c | 127 +++++++++++++++++++++++++
 1 file changed, 127 insertions(+)

Comments

Matthew Auld March 26, 2024, 5:46 p.m. UTC | #1
On 18/03/2024 21:40, Arunpravin Paneer Selvam wrote:
> Add a new test case for the drm buddy clear and dirty
> allocation.
> 
> Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
> Suggested-by: Matthew Auld <matthew.auld@intel.com>
> ---
>   drivers/gpu/drm/tests/drm_buddy_test.c | 127 +++++++++++++++++++++++++
>   1 file changed, 127 insertions(+)
> 
> diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
> index 454ad9952f56..d355a6e61893 100644
> --- a/drivers/gpu/drm/tests/drm_buddy_test.c
> +++ b/drivers/gpu/drm/tests/drm_buddy_test.c
> @@ -19,6 +19,132 @@ static inline u64 get_size(int order, u64 chunk_size)
>   	return (1 << order) * chunk_size;
>   }
>   
> +static void drm_test_buddy_alloc_clear(struct kunit *test)
> +{
> +	unsigned long n_pages, total, i = 0;
> +	const unsigned long ps = SZ_4K;
> +	struct drm_buddy_block *block;
> +	const int max_order = 12;
> +	LIST_HEAD(allocated);
> +	struct drm_buddy mm;
> +	unsigned int order;
> +	u64 mm_size, size;

Maybe just make these two u32 or unsigned long. That should be big 
enough, plus avoids any kind of 32b compilation bugs below.

> +	LIST_HEAD(dirty);
> +	LIST_HEAD(clean);
> +
> +	mm_size = PAGE_SIZE << max_order;

s/PAGE_SIZE/SZ_4K/ below also.

> +	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
> +
> +	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
> +
> +	/**

Drop the extra *, since is not actual kernel-doc. Below also.

> +	 * Idea is to allocate and free some random portion of the address space,
> +	 * returning those pages as non-dirty and randomly alternate between
> +	 * requesting dirty and non-dirty pages (not going over the limit
> +	 * we freed as non-dirty), putting that into two separate lists.
> +	 * Loop over both lists at the end checking that the dirty list
> +	 * is indeed all dirty pages and vice versa. Free it all again,
> +	 * keeping the dirty/clear status.
> +	 */
> +	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
> +							    5 * ps, ps, &allocated,
> +							    DRM_BUDDY_TOPDOWN_ALLOCATION),
> +				"buddy_alloc hit an error size=%u\n", 5 * ps);
> +	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
> +
> +	n_pages = 10;
> +	do {
> +		unsigned long flags;
> +		struct list_head *list;
> +		int slot = i % 2;
> +
> +		if (slot == 0) {
> +			list = &dirty;
> +			flags = 0;
> +		} else if (slot == 1) {

Could just be else {

> +			list = &clean;
> +			flags = DRM_BUDDY_CLEAR_ALLOCATION;
> +		}
> +
> +		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
> +								    ps, ps, list,
> +								    flags),
> +					"buddy_alloc hit an error size=%u\n", ps);
> +	} while (++i < n_pages);
> +
> +	list_for_each_entry(block, &clean, link)
> +		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
> +
> +	list_for_each_entry(block, &dirty, link)
> +		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
> +
> +	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
> +
> +	/**
> +	 * Trying to go over the clear limit for some allocation.
> +	 * The allocation should never fail with reasonable page-size.
> +	 */
> +	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
> +							    10 * ps, ps, &clean,
> +							    DRM_BUDDY_CLEAR_ALLOCATION),
> +				"buddy_alloc hit an error size=%u\n", 10 * ps);
> +
> +	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
> +	drm_buddy_free_list(&mm, &dirty, 0);
> +	drm_buddy_fini(&mm);
> +
> +	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
> +
> +	/**
> +	 * Create a new mm. Intentionally fragment the address space by creating
> +	 * two alternating lists. Free both lists, one as dirty the other as clean.
> +	 * Try to allocate double the previous size with matching min_page_size. The
> +	 * allocation should never fail as it calls the force_merge. Also check that
> +	 * the page is always dirty after force_merge. Free the page as dirty, then
> +	 * repeat the whole thing, increment the order until we hit the max_order.
> +	 */
> +
> +	order = 1;
> +	do {
> +		size = PAGE_SIZE << order;
> +		i = 0;
> +		n_pages = mm_size / ps;
> +		do {
> +			struct list_head *list;
> +			int slot = i % 2;
> +
> +			if (slot == 0)
> +				list = &dirty;
> +			else if (slot == 1)

else

> +				list = &clean;
> +
> +			KUNIT_ASSERT_FALSE_MSG(test,
> +					       drm_buddy_alloc_blocks(&mm, 0, mm_size,
> +								      ps, ps, list, 0),
> +					       "buddy_alloc hit an error size=%u\n",
> +					       ps);
> +		} while (++i < n_pages);

I think we only need to do this once at the beginning, and then just 
loop over each order starting from one? Otherwise on the first iteration 
here we fragment the entire address space, but then only allocate single 
order=1. And then we repeat the whole fragmentation again, which seems 
unnecessary.

> +
> +		drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
> +		drm_buddy_free_list(&mm, &dirty, 0);
> +
> +		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
> +								    size, size, &allocated,
> +								    DRM_BUDDY_CLEAR_ALLOCATION),
> +					"buddy_alloc hit an error size=%u\n", size);

size=%llu or better just make size u32.

> +		total = 0;
> +		list_for_each_entry(block, &allocated, link) {
> +			KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
> +			total += drm_buddy_block_size(&mm, block);
> +		}
> +		KUNIT_EXPECT_EQ(test, total, size);
> +
> +		drm_buddy_free_list(&mm, &allocated, 0);
> +	} while (++order <= max_order);

I think would be good to also do some non-power-of-two mm size. Just to 
ensure we get some coverage for the multi-root force_merge during fini. 
Something simple like create new mm here, allocate random size, free as 
cleared, then call fini.

Looks good otherwise.

> +
> +	drm_buddy_fini(&mm);
> +}
> +
>   static void drm_test_buddy_alloc_contiguous(struct kunit *test)
>   {
>   	const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
> @@ -368,6 +494,7 @@ static struct kunit_case drm_buddy_tests[] = {
>   	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
>   	KUNIT_CASE(drm_test_buddy_alloc_pathological),
>   	KUNIT_CASE(drm_test_buddy_alloc_contiguous),
> +	KUNIT_CASE(drm_test_buddy_alloc_clear),
>   	{}
>   };
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 454ad9952f56..d355a6e61893 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -19,6 +19,132 @@  static inline u64 get_size(int order, u64 chunk_size)
 	return (1 << order) * chunk_size;
 }
 
+static void drm_test_buddy_alloc_clear(struct kunit *test)
+{
+	unsigned long n_pages, total, i = 0;
+	const unsigned long ps = SZ_4K;
+	struct drm_buddy_block *block;
+	const int max_order = 12;
+	LIST_HEAD(allocated);
+	struct drm_buddy mm;
+	unsigned int order;
+	u64 mm_size, size;
+	LIST_HEAD(dirty);
+	LIST_HEAD(clean);
+
+	mm_size = PAGE_SIZE << max_order;
+	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+	/**
+	 * Idea is to allocate and free some random portion of the address space,
+	 * returning those pages as non-dirty and randomly alternate between
+	 * requesting dirty and non-dirty pages (not going over the limit
+	 * we freed as non-dirty), putting that into two separate lists.
+	 * Loop over both lists at the end checking that the dirty list
+	 * is indeed all dirty pages and vice versa. Free it all again,
+	 * keeping the dirty/clear status.
+	 */
+	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+							    5 * ps, ps, &allocated,
+							    DRM_BUDDY_TOPDOWN_ALLOCATION),
+				"buddy_alloc hit an error size=%u\n", 5 * ps);
+	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+
+	n_pages = 10;
+	do {
+		unsigned long flags;
+		struct list_head *list;
+		int slot = i % 2;
+
+		if (slot == 0) {
+			list = &dirty;
+			flags = 0;
+		} else if (slot == 1) {
+			list = &clean;
+			flags = DRM_BUDDY_CLEAR_ALLOCATION;
+		}
+
+		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+								    ps, ps, list,
+								    flags),
+					"buddy_alloc hit an error size=%u\n", ps);
+	} while (++i < n_pages);
+
+	list_for_each_entry(block, &clean, link)
+		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
+
+	list_for_each_entry(block, &dirty, link)
+		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+
+	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+
+	/**
+	 * Trying to go over the clear limit for some allocation.
+	 * The allocation should never fail with reasonable page-size.
+	 */
+	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+							    10 * ps, ps, &clean,
+							    DRM_BUDDY_CLEAR_ALLOCATION),
+				"buddy_alloc hit an error size=%u\n", 10 * ps);
+
+	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+	drm_buddy_free_list(&mm, &dirty, 0);
+	drm_buddy_fini(&mm);
+
+	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+	/**
+	 * Create a new mm. Intentionally fragment the address space by creating
+	 * two alternating lists. Free both lists, one as dirty the other as clean.
+	 * Try to allocate double the previous size with matching min_page_size. The
+	 * allocation should never fail as it calls the force_merge. Also check that
+	 * the page is always dirty after force_merge. Free the page as dirty, then
+	 * repeat the whole thing, increment the order until we hit the max_order.
+	 */
+
+	order = 1;
+	do {
+		size = PAGE_SIZE << order;
+		i = 0;
+		n_pages = mm_size / ps;
+		do {
+			struct list_head *list;
+			int slot = i % 2;
+
+			if (slot == 0)
+				list = &dirty;
+			else if (slot == 1)
+				list = &clean;
+
+			KUNIT_ASSERT_FALSE_MSG(test,
+					       drm_buddy_alloc_blocks(&mm, 0, mm_size,
+								      ps, ps, list, 0),
+					       "buddy_alloc hit an error size=%u\n",
+					       ps);
+		} while (++i < n_pages);
+
+		drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+		drm_buddy_free_list(&mm, &dirty, 0);
+
+		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+								    size, size, &allocated,
+								    DRM_BUDDY_CLEAR_ALLOCATION),
+					"buddy_alloc hit an error size=%u\n", size);
+		total = 0;
+		list_for_each_entry(block, &allocated, link) {
+			KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+			total += drm_buddy_block_size(&mm, block);
+		}
+		KUNIT_EXPECT_EQ(test, total, size);
+
+		drm_buddy_free_list(&mm, &allocated, 0);
+	} while (++order <= max_order);
+
+	drm_buddy_fini(&mm);
+}
+
 static void drm_test_buddy_alloc_contiguous(struct kunit *test)
 {
 	const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
@@ -368,6 +494,7 @@  static struct kunit_case drm_buddy_tests[] = {
 	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
 	KUNIT_CASE(drm_test_buddy_alloc_pathological),
 	KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+	KUNIT_CASE(drm_test_buddy_alloc_clear),
 	{}
 };