diff mbox

[i-g-t] tests/gem_mmap_gtt: add huge BO test

Message ID 1428409388.22682.3.camel@jlahtine-mobl1 (mailing list archive)
State New, archived
Headers show

Commit Message

Joonas Lahtinen April 7, 2015, 12:23 p.m. UTC
Add a straightforward test that allocates a BO that is bigger than
(by 1 page currently) the mappable aperture, tests mmap access to it
by CPU directly and through GTT in sequence.

Currently it is expected for the GTT access to gracefully fail as
all objects are attempted to get pinned to GTT completely for mmap
access. Once the partial view support is merged to kernel, the test
should pass for all parts.

Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 tests/gem_mmap_gtt.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 68 insertions(+)

Comments

Chris Wilson April 7, 2015, 2:08 p.m. UTC | #1
On Tue, Apr 07, 2015 at 03:23:08PM +0300, Joonas Lahtinen wrote:
>  static void
> +test_huge_bo(int fd)
> +{
> +	uint32_t bo;
> +	char *ptr_cpu;
> +	char *ptr_gtt;
> +	char *cpu_pattern;
> +	uint64_t mappable_aperture_pages = gem_mappable_aperture_size() /
> +					   PAGE_SIZE;
> +	uint64_t huge_object_size = (mappable_aperture_pages + 1) * PAGE_SIZE;
> +	uint64_t last_offset = huge_object_size - PAGE_SIZE;
> +
> +	cpu_pattern = malloc(PAGE_SIZE);
> +	igt_assert(cpu_pattern);
> +	memset(cpu_pattern, 0xaa, PAGE_SIZE);
> +
> +	bo = gem_create(fd, huge_object_size);
> +
> +	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
> +				PROT_READ | PROT_WRITE);
> +	if (!ptr_cpu) {
> +		igt_warn("Not enough free memory for huge BO test!\n");
> +		goto out;
> +	}

Should be a set-domain(CPU, CPU) here.

> +	/* Test read/write to first/last page with CPU. */
> +	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_cpu, cpu_pattern, PAGE_SIZE) == 0);
> +
> +	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> +
> +	igt_assert(memcmp(ptr_cpu, ptr_cpu + last_offset, PAGE_SIZE) == 0);
> +
> +	munmap(ptr_cpu, huge_object_size);
> +	ptr_cpu = NULL;
> +
> +	ptr_gtt = gem_mmap__gtt(fd, bo, huge_object_size,
> +			        PROT_READ | PROT_WRITE);
> +	if (!ptr_gtt) {
> +		igt_debug("Huge BO GTT mapping not supported!\n");
> +		goto out;
> +	}
> +
> +	/* Test read/write to first/last page through GTT. */
> +	set_domain(fd, bo);
> +
> +	igt_assert(memcmp(ptr_gtt, cpu_pattern, PAGE_SIZE) == 0);
> +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> +
> +	memset(ptr_gtt, 0x55, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> +
> +	memset(ptr_gtt + last_offset, 0x55, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_gtt, ptr_gtt + last_offset, PAGE_SIZE) == 0);
> +
> +	munmap(ptr_gtt, huge_object_size);

And repeat the CPU sanity check (for 0x55). Perhaps using pread this time.

And tiling checks.
-Chris
Joonas Lahtinen April 8, 2015, 10:45 a.m. UTC | #2
Hi,

On ti, 2015-04-07 at 15:08 +0100, Chris Wilson wrote:
> On Tue, Apr 07, 2015 at 03:23:08PM +0300, Joonas Lahtinen wrote:
[snip]
> > +
> > +	bo = gem_create(fd, huge_object_size);
> > +
> > +	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
> > +				PROT_READ | PROT_WRITE);
> > +	if (!ptr_cpu) {
> > +		igt_warn("Not enough free memory for huge BO test!\n");
> > +		goto out;
> > +	}
> 
> Should be a set-domain(CPU, CPU) here.

Corrected, that went unnoticed when I moved the test to be last test.

> 
> > +	/* Test read/write to first/last page with CPU. */
> > +	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
> > +	igt_assert(memcmp(ptr_cpu, cpu_pattern, PAGE_SIZE) == 0);
> > +
> > +	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
> > +	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> > +
> > +	igt_assert(memcmp(ptr_cpu, ptr_cpu + last_offset, PAGE_SIZE) == 0);
> > +
> > +	munmap(ptr_cpu, huge_object_size);
> > +	ptr_cpu = NULL;
> > +
> > +	ptr_gtt = gem_mmap__gtt(fd, bo, huge_object_size,
> > +			        PROT_READ | PROT_WRITE);
> > +	if (!ptr_gtt) {
> > +		igt_debug("Huge BO GTT mapping not supported!\n");
> > +		goto out;
> > +	}
> > +
> > +	/* Test read/write to first/last page through GTT. */
> > +	set_domain(fd, bo);
> > +
> > +	igt_assert(memcmp(ptr_gtt, cpu_pattern, PAGE_SIZE) == 0);
> > +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> > +
> > +	memset(ptr_gtt, 0x55, PAGE_SIZE);
> > +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> > +
> > +	memset(ptr_gtt + last_offset, 0x55, PAGE_SIZE);
> > +	igt_assert(memcmp(ptr_gtt, ptr_gtt + last_offset, PAGE_SIZE) == 0);
> > +
> > +	munmap(ptr_gtt, huge_object_size);
> 
> And repeat the CPU sanity check (for 0x55). Perhaps using pread this time.
> 

I stuck to mmap in this test. Initial partial views revision will not
have fencing as agreed with Daniel, just checks to bail out if somebody
attempts to to partially map a tiled buffer because it's going to
require reworking all the fence calculation functions to calculate the
stride based on view size and not buffer size.

Will send a new patch (two actually) shortly.

Regards, Joonas

> And tiling checks.
> -Chris
>
Tvrtko Ursulin April 13, 2015, 11:32 a.m. UTC | #3
Hi,

On 04/07/2015 01:23 PM, Joonas Lahtinen wrote:
> Add a straightforward test that allocates a BO that is bigger than
> (by 1 page currently) the mappable aperture, tests mmap access to it
> by CPU directly and through GTT in sequence.
>
> Currently it is expected for the GTT access to gracefully fail as
> all objects are attempted to get pinned to GTT completely for mmap
> access. Once the partial view support is merged to kernel, the test
> should pass for all parts.
>
> Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> ---
>   tests/gem_mmap_gtt.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>   1 file changed, 68 insertions(+)
>
> diff --git a/tests/gem_mmap_gtt.c b/tests/gem_mmap_gtt.c
> index 55c66a2..bf3627c 100644
> --- a/tests/gem_mmap_gtt.c
> +++ b/tests/gem_mmap_gtt.c
> @@ -41,6 +41,10 @@
>   #include "drmtest.h"
>   #include "igt_debugfs.h"
>
> +#ifndef PAGE_SIZE
> +#define PAGE_SIZE 4096
> +#endif
> +
>   static int OBJECT_SIZE = 16*1024*1024;
>
>   static void set_domain(int fd, uint32_t handle)
> @@ -258,6 +262,68 @@ test_write_gtt(int fd)
>   }
>
>   static void
> +test_huge_bo(int fd)
> +{
> +	uint32_t bo;
> +	char *ptr_cpu;
> +	char *ptr_gtt;
> +	char *cpu_pattern;
> +	uint64_t mappable_aperture_pages = gem_mappable_aperture_size() /
> +					   PAGE_SIZE;
> +	uint64_t huge_object_size = (mappable_aperture_pages + 1) * PAGE_SIZE;
> +	uint64_t last_offset = huge_object_size - PAGE_SIZE;
> +
> +	cpu_pattern = malloc(PAGE_SIZE);
> +	igt_assert(cpu_pattern);

I'd be tempted to use 4k from the stack for simplicity.

> +	memset(cpu_pattern, 0xaa, PAGE_SIZE);
> +
> +	bo = gem_create(fd, huge_object_size);
> +
> +	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
> +				PROT_READ | PROT_WRITE);
> +	if (!ptr_cpu) {
> +		igt_warn("Not enough free memory for huge BO test!\n");
> +		goto out;

Free address space or free memory?

Also, igt_require so test skips in that case?

> +	}
> +
> +	/* Test read/write to first/last page with CPU. */
> +	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_cpu, cpu_pattern, PAGE_SIZE) == 0);
> +
> +	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> +
> +	igt_assert(memcmp(ptr_cpu, ptr_cpu + last_offset, PAGE_SIZE) == 0);
> +
> +	munmap(ptr_cpu, huge_object_size);
> +	ptr_cpu = NULL;
> +
> +	ptr_gtt = gem_mmap__gtt(fd, bo, huge_object_size,
> +			        PROT_READ | PROT_WRITE);
> +	if (!ptr_gtt) {
> +		igt_debug("Huge BO GTT mapping not supported!\n");
> +		goto out;

igt_require as above? Hm, although ideally test would be able to detect 
the feature (once it is added to the kernel) so it could assert here.

> +	}
> +
> +	/* Test read/write to first/last page through GTT. */
> +	set_domain(fd, bo);
> +
> +	igt_assert(memcmp(ptr_gtt, cpu_pattern, PAGE_SIZE) == 0);
> +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> +
> +	memset(ptr_gtt, 0x55, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> +
> +	memset(ptr_gtt + last_offset, 0x55, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_gtt, ptr_gtt + last_offset, PAGE_SIZE) == 0);

Comments for the above would be nice just to explain what is being 
tested and how.

Won't the last test has side effects with partial views since it is 
accessing beginning and end of the object? Would it be better to memcmp 
against a pattern on stack or in heap like cpu_pattern?

Will you support two simultaneous partial views or the last memcmp will 
cause a lot of partial view creation/destruction?

> +
> +	munmap(ptr_gtt, huge_object_size);
> +out:
> +	gem_close(fd, bo);
> +	free(cpu_pattern);
> +}
> +
> +static void
>   test_read(int fd)
>   {
>   	void *dst;
> @@ -395,6 +461,8 @@ igt_main
>   		run_without_prefault(fd, test_write_gtt);
>   	igt_subtest("write-cpu-read-gtt")
>   		test_write_cpu_read_gtt(fd);
> +	igt_subtest("huge-bo")
> +		test_huge_bo(fd);
>
>   	igt_fixture
>   		close(fd);
>

Regards,

Tvrtko
Joonas Lahtinen April 13, 2015, 2:22 p.m. UTC | #4
On ma, 2015-04-13 at 12:32 +0100, Tvrtko Ursulin wrote:
> Hi,
> 
> On 04/07/2015 01:23 PM, Joonas Lahtinen wrote:
> > Add a straightforward test that allocates a BO that is bigger than
> > (by 1 page currently) the mappable aperture, tests mmap access to it
> > by CPU directly and through GTT in sequence.
> >
> > Currently it is expected for the GTT access to gracefully fail as
> > all objects are attempted to get pinned to GTT completely for mmap
> > access. Once the partial view support is merged to kernel, the test
> > should pass for all parts.
> >
> > Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > ---
> >   tests/gem_mmap_gtt.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++
> >   1 file changed, 68 insertions(+)
> >
> > diff --git a/tests/gem_mmap_gtt.c b/tests/gem_mmap_gtt.c
> > index 55c66a2..bf3627c 100644
> > --- a/tests/gem_mmap_gtt.c
> > +++ b/tests/gem_mmap_gtt.c
> > @@ -41,6 +41,10 @@
> >   #include "drmtest.h"
> >   #include "igt_debugfs.h"
> >
> > +#ifndef PAGE_SIZE
> > +#define PAGE_SIZE 4096
> > +#endif
> > +
> >   static int OBJECT_SIZE = 16*1024*1024;
> >
> >   static void set_domain(int fd, uint32_t handle)
> > @@ -258,6 +262,68 @@ test_write_gtt(int fd)
> >   }
> >
> >   static void
> > +test_huge_bo(int fd)
> > +{
> > +	uint32_t bo;
> > +	char *ptr_cpu;
> > +	char *ptr_gtt;
> > +	char *cpu_pattern;
> > +	uint64_t mappable_aperture_pages = gem_mappable_aperture_size() /
> > +					   PAGE_SIZE;
> > +	uint64_t huge_object_size = (mappable_aperture_pages + 1) * PAGE_SIZE;
> > +	uint64_t last_offset = huge_object_size - PAGE_SIZE;
> > +
> > +	cpu_pattern = malloc(PAGE_SIZE);
> > +	igt_assert(cpu_pattern);
> 
> I'd be tempted to use 4k from the stack for simplicity.

It's not a nice thing to allocate two 4k objects from stack. So lets
just not.

> > +	memset(cpu_pattern, 0xaa, PAGE_SIZE);
> > +
> > +	bo = gem_create(fd, huge_object_size);
> > +
> > +	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
> > +				PROT_READ | PROT_WRITE);
> > +	if (!ptr_cpu) {
> > +		igt_warn("Not enough free memory for huge BO test!\n");
> > +		goto out;
> 
> Free address space or free memory?
> 

It is not really relevant to the test which condition caused it. But
yeah, correcting the error message into 'Can not allocate memory'.

> Also, igt_require so test skips in that case?
> 

Ack using igt_require_f. Because the condition is bit unclear without
the text.

> > +	}
> > +
> > +	/* Test read/write to first/last page with CPU. */
> > +	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
> > +	igt_assert(memcmp(ptr_cpu, cpu_pattern, PAGE_SIZE) == 0);
> > +
> > +	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
> > +	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> > +
> > +	igt_assert(memcmp(ptr_cpu, ptr_cpu + last_offset, PAGE_SIZE) == 0);
> > +
> > +	munmap(ptr_cpu, huge_object_size);
> > +	ptr_cpu = NULL;
> > +
> > +	ptr_gtt = gem_mmap__gtt(fd, bo, huge_object_size,
> > +			        PROT_READ | PROT_WRITE);
> > +	if (!ptr_gtt) {
> > +		igt_debug("Huge BO GTT mapping not supported!\n");
> > +		goto out;
> 
> igt_require as above? Hm, although ideally test would be able to detect 
> the feature (once it is added to the kernel) so it could assert here.
> 

I think the point is somewhat that UMP should not need to know/care
about it. Before introducing the feature the above will always fail, and
after introducing it, it will always succeed (unless there is less than
1MB aperture space available). So I think it should be good as it is.

> > +	}
> > +
> > +	/* Test read/write to first/last page through GTT. */
> > +	set_domain(fd, bo);
> > +
> > +	igt_assert(memcmp(ptr_gtt, cpu_pattern, PAGE_SIZE) == 0);
> > +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> > +
> > +	memset(ptr_gtt, 0x55, PAGE_SIZE);
> > +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
> > +
> > +	memset(ptr_gtt + last_offset, 0x55, PAGE_SIZE);
> > +	igt_assert(memcmp(ptr_gtt, ptr_gtt + last_offset, PAGE_SIZE) == 0);
> 
> Comments for the above would be nice just to explain what is being 
> tested and how.
> 

The level of commenting was higher already than I noticed to be in other
tests, but I'll add a few more.

> Won't the last test has side effects with partial views since it is 
> accessing beginning and end of the object? Would it be better to memcmp 
> against a pattern on stack or in heap like cpu_pattern?
> 
> Will you support two simultaneous partial views or the last memcmp will 
> cause a lot of partial view creation/destruction?

Yes, there will be multiple partial views, but it's all internal to the
kernel implementation. Above access pattern should be supported.

Regards, Joonas

> 
> > +
> > +	munmap(ptr_gtt, huge_object_size);
> > +out:
> > +	gem_close(fd, bo);
> > +	free(cpu_pattern);
> > +}
> > +
> > +static void
> >   test_read(int fd)
> >   {
> >   	void *dst;
> > @@ -395,6 +461,8 @@ igt_main
> >   		run_without_prefault(fd, test_write_gtt);
> >   	igt_subtest("write-cpu-read-gtt")
> >   		test_write_cpu_read_gtt(fd);
> > +	igt_subtest("huge-bo")
> > +		test_huge_bo(fd);
> >
> >   	igt_fixture
> >   		close(fd);
> >
> 
> Regards,
> 
> Tvrtko
Tvrtko Ursulin April 13, 2015, 2:49 p.m. UTC | #5
On 04/13/2015 03:22 PM, Joonas Lahtinen wrote:
> On ma, 2015-04-13 at 12:32 +0100, Tvrtko Ursulin wrote:
>> Hi,
>>
>> On 04/07/2015 01:23 PM, Joonas Lahtinen wrote:
>>> Add a straightforward test that allocates a BO that is bigger than
>>> (by 1 page currently) the mappable aperture, tests mmap access to it
>>> by CPU directly and through GTT in sequence.
>>>
>>> Currently it is expected for the GTT access to gracefully fail as
>>> all objects are attempted to get pinned to GTT completely for mmap
>>> access. Once the partial view support is merged to kernel, the test
>>> should pass for all parts.
>>>
>>> Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
>>> ---
>>>    tests/gem_mmap_gtt.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>>>    1 file changed, 68 insertions(+)
>>>
>>> diff --git a/tests/gem_mmap_gtt.c b/tests/gem_mmap_gtt.c
>>> index 55c66a2..bf3627c 100644
>>> --- a/tests/gem_mmap_gtt.c
>>> +++ b/tests/gem_mmap_gtt.c
>>> @@ -41,6 +41,10 @@
>>>    #include "drmtest.h"
>>>    #include "igt_debugfs.h"
>>>
>>> +#ifndef PAGE_SIZE
>>> +#define PAGE_SIZE 4096
>>> +#endif
>>> +
>>>    static int OBJECT_SIZE = 16*1024*1024;
>>>
>>>    static void set_domain(int fd, uint32_t handle)
>>> @@ -258,6 +262,68 @@ test_write_gtt(int fd)
>>>    }
>>>
>>>    static void
>>> +test_huge_bo(int fd)
>>> +{
>>> +	uint32_t bo;
>>> +	char *ptr_cpu;
>>> +	char *ptr_gtt;
>>> +	char *cpu_pattern;
>>> +	uint64_t mappable_aperture_pages = gem_mappable_aperture_size() /
>>> +					   PAGE_SIZE;
>>> +	uint64_t huge_object_size = (mappable_aperture_pages + 1) * PAGE_SIZE;
>>> +	uint64_t last_offset = huge_object_size - PAGE_SIZE;
>>> +
>>> +	cpu_pattern = malloc(PAGE_SIZE);
>>> +	igt_assert(cpu_pattern);
>>
>> I'd be tempted to use 4k from the stack for simplicity.
>
> It's not a nice thing to allocate two 4k objects from stack. So lets
> just not.

Why not? It's not kernel stack but 8MiB default for a simple IGT... and 
changelog to v3 says otherwise. ;) But ok..

>>> +	memset(cpu_pattern, 0xaa, PAGE_SIZE);
>>> +
>>> +	bo = gem_create(fd, huge_object_size);
>>> +
>>> +	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
>>> +				PROT_READ | PROT_WRITE);
>>> +	if (!ptr_cpu) {
>>> +		igt_warn("Not enough free memory for huge BO test!\n");
>>> +		goto out;
>>
>> Free address space or free memory?
>>
>
> It is not really relevant to the test which condition caused it. But
> yeah, correcting the error message into 'Can not allocate memory'.

Is it really memory and not address space?

>> Also, igt_require so test skips in that case?
>>
>
> Ack using igt_require_f. Because the condition is bit unclear without
> the text.
>
>>> +	}
>>> +
>>> +	/* Test read/write to first/last page with CPU. */
>>> +	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
>>> +	igt_assert(memcmp(ptr_cpu, cpu_pattern, PAGE_SIZE) == 0);
>>> +
>>> +	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
>>> +	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);
>>> +
>>> +	igt_assert(memcmp(ptr_cpu, ptr_cpu + last_offset, PAGE_SIZE) == 0);
>>> +
>>> +	munmap(ptr_cpu, huge_object_size);
>>> +	ptr_cpu = NULL;
>>> +
>>> +	ptr_gtt = gem_mmap__gtt(fd, bo, huge_object_size,
>>> +			        PROT_READ | PROT_WRITE);
>>> +	if (!ptr_gtt) {
>>> +		igt_debug("Huge BO GTT mapping not supported!\n");
>>> +		goto out;
>>
>> igt_require as above? Hm, although ideally test would be able to detect
>> the feature (once it is added to the kernel) so it could assert here.
>>
>
> I think the point is somewhat that UMP should not need to know/care
> about it. Before introducing the feature the above will always fail, and
> after introducing it, it will always succeed (unless there is less than
> 1MB aperture space available). So I think it should be good as it is.

I suppose there isn't really a way to be smarter in this case.

>>> +	}
>>> +
>>> +	/* Test read/write to first/last page through GTT. */
>>> +	set_domain(fd, bo);
>>> +
>>> +	igt_assert(memcmp(ptr_gtt, cpu_pattern, PAGE_SIZE) == 0);
>>> +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
>>> +
>>> +	memset(ptr_gtt, 0x55, PAGE_SIZE);
>>> +	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
>>> +
>>> +	memset(ptr_gtt + last_offset, 0x55, PAGE_SIZE);
>>> +	igt_assert(memcmp(ptr_gtt, ptr_gtt + last_offset, PAGE_SIZE) == 0);
>>
>> Comments for the above would be nice just to explain what is being
>> tested and how.
>>
>
> The level of commenting was higher already than I noticed to be in other
> tests, but I'll add a few more.

Thanks, it's best to lead by example!

>> Won't the last test has side effects with partial views since it is
>> accessing beginning and end of the object? Would it be better to memcmp
>> against a pattern on stack or in heap like cpu_pattern?
>>
>> Will you support two simultaneous partial views or the last memcmp will
>> cause a lot of partial view creation/destruction?
>
> Yes, there will be multiple partial views, but it's all internal to the
> kernel implementation. Above access pattern should be supported.

Very well then!

Regards,

Tvrtko
diff mbox

Patch

diff --git a/tests/gem_mmap_gtt.c b/tests/gem_mmap_gtt.c
index 55c66a2..bf3627c 100644
--- a/tests/gem_mmap_gtt.c
+++ b/tests/gem_mmap_gtt.c
@@ -41,6 +41,10 @@ 
 #include "drmtest.h"
 #include "igt_debugfs.h"
 
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
 static int OBJECT_SIZE = 16*1024*1024;
 
 static void set_domain(int fd, uint32_t handle)
@@ -258,6 +262,68 @@  test_write_gtt(int fd)
 }
 
 static void
+test_huge_bo(int fd)
+{
+	uint32_t bo;
+	char *ptr_cpu;
+	char *ptr_gtt;
+	char *cpu_pattern;
+	uint64_t mappable_aperture_pages = gem_mappable_aperture_size() /
+					   PAGE_SIZE;
+	uint64_t huge_object_size = (mappable_aperture_pages + 1) * PAGE_SIZE;
+	uint64_t last_offset = huge_object_size - PAGE_SIZE;
+
+	cpu_pattern = malloc(PAGE_SIZE);
+	igt_assert(cpu_pattern);
+	memset(cpu_pattern, 0xaa, PAGE_SIZE);
+
+	bo = gem_create(fd, huge_object_size);
+
+	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
+				PROT_READ | PROT_WRITE);
+	if (!ptr_cpu) {
+		igt_warn("Not enough free memory for huge BO test!\n");
+		goto out;
+	}
+
+	/* Test read/write to first/last page with CPU. */
+	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
+	igt_assert(memcmp(ptr_cpu, cpu_pattern, PAGE_SIZE) == 0);
+
+	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
+	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);
+
+	igt_assert(memcmp(ptr_cpu, ptr_cpu + last_offset, PAGE_SIZE) == 0);
+
+	munmap(ptr_cpu, huge_object_size);
+	ptr_cpu = NULL;
+
+	ptr_gtt = gem_mmap__gtt(fd, bo, huge_object_size,
+			        PROT_READ | PROT_WRITE);
+	if (!ptr_gtt) {
+		igt_debug("Huge BO GTT mapping not supported!\n");
+		goto out;
+	}
+
+	/* Test read/write to first/last page through GTT. */
+	set_domain(fd, bo);
+
+	igt_assert(memcmp(ptr_gtt, cpu_pattern, PAGE_SIZE) == 0);
+	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
+
+	memset(ptr_gtt, 0x55, PAGE_SIZE);
+	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
+
+	memset(ptr_gtt + last_offset, 0x55, PAGE_SIZE);
+	igt_assert(memcmp(ptr_gtt, ptr_gtt + last_offset, PAGE_SIZE) == 0);
+
+	munmap(ptr_gtt, huge_object_size);
+out:
+	gem_close(fd, bo);
+	free(cpu_pattern);
+}
+
+static void
 test_read(int fd)
 {
 	void *dst;
@@ -395,6 +461,8 @@  igt_main
 		run_without_prefault(fd, test_write_gtt);
 	igt_subtest("write-cpu-read-gtt")
 		test_write_cpu_read_gtt(fd);
+	igt_subtest("huge-bo")
+		test_huge_bo(fd);
 
 	igt_fixture
 		close(fd);