diff mbox series

[i-g-t,2/2] i915/sysfs_clients: Check that client ids are cyclic

Message ID 20210126130505.2938391-2-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [i-g-t,1/2] i915/sysfs_client: Ignore clients being closed as we read their sysfs | expand

Commit Message

Chris Wilson Jan. 26, 2021, 1:05 p.m. UTC
The client id used is a cyclic allocator as that reduces the likelihood
of userspace seeing the same id used again (and so confusing the new
client as the old). Verify that each new client has an id greater than
the last.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
---
 tests/i915/sysfs_clients.c | 129 +++++++++++++++++++++++++++++++------
 1 file changed, 108 insertions(+), 21 deletions(-)

Comments

Tvrtko Ursulin Jan. 26, 2021, 4:47 p.m. UTC | #1
On 26/01/2021 13:05, Chris Wilson wrote:
> The client id used is a cyclic allocator as that reduces the likelihood
> of userspace seeing the same id used again (and so confusing the new
> client as the old). Verify that each new client has an id greater than
> the last.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> ---
>   tests/i915/sysfs_clients.c | 129 +++++++++++++++++++++++++++++++------
>   1 file changed, 108 insertions(+), 21 deletions(-)
> 
> diff --git a/tests/i915/sysfs_clients.c b/tests/i915/sysfs_clients.c
> index a3a1f81e1..d2c1ebc5f 100644
> --- a/tests/i915/sysfs_clients.c
> +++ b/tests/i915/sysfs_clients.c
> @@ -8,6 +8,7 @@
>   #include <errno.h>
>   #include <fcntl.h>
>   #include <inttypes.h>
> +#include <limits.h>
>   #include <math.h>
>   #include <sched.h>
>   #include <sys/socket.h>
> @@ -47,6 +48,8 @@
>   #define assert_within_epsilon(x, ref, tolerance) \
>   	__assert_within_epsilon(x, ref, tolerance / 100., tolerance / 100.)
>   
> +#define BUFSZ 280
> +
>   #define MI_BATCH_BUFFER_START (0x31 << 23)
>   #define MI_BATCH_BUFFER_END (0xa << 23)
>   #define MI_ARB_CHECK (0x5 << 23)
> @@ -75,7 +78,7 @@ static void pidname(int i915, int clients)
>   {
>   	struct dirent *de;
>   	int sv[2], rv[2];
> -	char buf[280];
> +	char buf[BUFSZ];
>   	int me = -1;
>   	long count;
>   	pid_t pid;
> @@ -180,7 +183,7 @@ static long count_clients(int clients)
>   {
>   	struct dirent *de;
>   	long count = 0;
> -	char buf[280];
> +	char buf[BUFSZ];
>   	DIR *dir;
>   
>   	dir = fdopendir(dup(clients));
> @@ -229,32 +232,113 @@ static void create(int i915, int clients)
>   	igt_assert_eq(count_clients(clients), 1);
>   }
>   
> +static const char *find_client(int clients, pid_t pid, char *buf)
> +{
> +	DIR *dir = fdopendir(dup(clients));
> +
> +	/* Reading a dir as it changes does not appear to be stable, SEP */

Oh yes, it is POSIX undefined as far as I could figure out. You are 
creating/destroying clients while reading the dir from different 
threads? I think best might be to introduce explicit sync points between 
those two entities to make this reliable. In another review for the same 
problem I suggested pipes for implementing this handshake. Along the 
lines of "let child open/close some processes, make it wait for parent; 
parent scans sysfs, signals child to open/close some more; repeat for a 
while".

Regards,

Tvrtko

> +	for (int pass = 0; pass < 2; pass++) {
> +		struct dirent *de;
> +
> +		rewinddir(dir);
> +		while ((de = readdir(dir))) {
> +			if (!isdigit(de->d_name[0]))
> +				continue;
> +
> +			snprintf(buf, BUFSZ, "%s/pid", de->d_name);
> +			igt_sysfs_read(clients, buf, buf, sizeof(buf));
> +			if (atoi(buf) != pid)
> +				continue;
> +
> +			strncpy(buf, de->d_name, BUFSZ);
> +			goto out;
> +		}
> +	}
> +	*buf = '\0';
> +out:
> +	closedir(dir);
> +	return buf;
> +}
> +
>   static int find_me(int clients, pid_t pid)
>   {
> -	struct dirent *de;
> -	char buf[280];
> -	int me = -1;
> -	DIR *dir;
> +	char buf[BUFSZ];
>   
> -	dir = fdopendir(dup(clients));
> -	igt_assert(dir);
> -	rewinddir(dir);
> +	return openat(clients,
> +		      find_client(clients, pid, buf),
> +		      O_DIRECTORY | O_RDONLY);
> +}
>   
> -	while ((de = readdir(dir))) {
> -		if (!isdigit(de->d_name[0]))
> -			continue;
> +static int reopen_directory(int fd)
> +{
> +	char buf[BUFSZ];
> +	int dir;
>   
> -		snprintf(buf, sizeof(buf), "%s/pid", de->d_name);
> -		igt_sysfs_read(clients, buf, buf, sizeof(buf));
> -		if (atoi(buf) != pid)
> -			continue;
> +	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", fd);
> +	dir = open(buf, O_RDONLY);
> +	igt_assert_fd(dir);
>   
> -		me = openat(clients, de->d_name, O_DIRECTORY | O_RDONLY);
> -		break;
> +	return dir;
> +}
> +
> +static unsigned int my_id(int clients, pid_t pid)
> +{
> +	char buf[BUFSZ];
> +
> +	return atoi(find_client(clients, pid, buf));
> +}
> +
> +static unsigned int recycle_client(int i915, int clients)
> +{
> +	int device, client;
> +
> +	device = gem_reopen_driver(i915);
> +	client = my_id(clients, getpid());
> +	close(device);
> +
> +	return client;
> +}
> +
> +static void recycle(int i915, int clients)
> +{
> +	const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
> +
> +	/*
> +	 * As we open and close clients, we do not expect to reuse old ids,
> +	 * i.e. we use a cyclic ida. This reduces the likelihood of userspace
> +	 * watchers becoming confused and mistaking the new client as a
> +	 * continuation of the old.
> +	 */
> +	igt_require(my_id(clients, getpid()) < INT_MAX / 2);
> +	igt_assert(my_id(clients, getpid()));
> +
> +	igt_fork(child, 2 * ncpus) {
> +		unsigned int client, last;
> +
> +		/* Reopen the directory fd for each client */
> +		clients = reopen_directory(clients);
> +
> +		last = recycle_client(i915, clients);
> +		igt_info("Child[%d] first client:%d\n", getpid(), last);
> +		igt_until_timeout(5) {
> +			client = recycle_client(i915, clients);
> +
> +			/* In 5 seconds we are not going to exhaust the ids */
> +			igt_assert(client > last);
> +			last = client;
> +		}
> +		igt_info("Child[%d] last client:%d\n", getpid(), last);
>   	}
> +	igt_waitchildren();
>   
> -	closedir(dir);
> -	return me;
> +	/* Cleanup delayed behind rcu */
> +	igt_until_timeout(30) {
> +		sched_yield();
> +		if (count_clients(clients) == 1)
> +			break;
> +		usleep(10000);
> +	}
> +	igt_assert_eq(count_clients(clients), 1);
>   }
>   
>   static int64_t read_runtime(int client, int class)
> @@ -719,7 +803,7 @@ sema(int i915, int clients, const struct intel_execution_engine2 *e, int f)
>   static int read_all(int clients, pid_t pid, int class, uint64_t *runtime)
>   {
>   	struct dirent *de;
> -	char buf[280];
> +	char buf[BUFSZ];
>   	int count = 0;
>   	DIR *dir;
>   
> @@ -958,6 +1042,9 @@ igt_main
>   	igt_subtest("create")
>   		create(i915, clients);
>   
> +	igt_subtest("recycle")
> +		recycle(i915, clients);
> +
>   	igt_subtest_group
>   		test_busy(i915, clients);
>   
>
Chris Wilson Jan. 26, 2021, 10:04 p.m. UTC | #2
Quoting Tvrtko Ursulin (2021-01-26 16:47:14)
> 
> On 26/01/2021 13:05, Chris Wilson wrote:
> > The client id used is a cyclic allocator as that reduces the likelihood
> > of userspace seeing the same id used again (and so confusing the new
> > client as the old). Verify that each new client has an id greater than
> > the last.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> > ---
> >   tests/i915/sysfs_clients.c | 129 +++++++++++++++++++++++++++++++------
> >   1 file changed, 108 insertions(+), 21 deletions(-)
> > 
> > diff --git a/tests/i915/sysfs_clients.c b/tests/i915/sysfs_clients.c
> > index a3a1f81e1..d2c1ebc5f 100644
> > --- a/tests/i915/sysfs_clients.c
> > +++ b/tests/i915/sysfs_clients.c
> > @@ -8,6 +8,7 @@
> >   #include <errno.h>
> >   #include <fcntl.h>
> >   #include <inttypes.h>
> > +#include <limits.h>
> >   #include <math.h>
> >   #include <sched.h>
> >   #include <sys/socket.h>
> > @@ -47,6 +48,8 @@
> >   #define assert_within_epsilon(x, ref, tolerance) \
> >       __assert_within_epsilon(x, ref, tolerance / 100., tolerance / 100.)
> >   
> > +#define BUFSZ 280
> > +
> >   #define MI_BATCH_BUFFER_START (0x31 << 23)
> >   #define MI_BATCH_BUFFER_END (0xa << 23)
> >   #define MI_ARB_CHECK (0x5 << 23)
> > @@ -75,7 +78,7 @@ static void pidname(int i915, int clients)
> >   {
> >       struct dirent *de;
> >       int sv[2], rv[2];
> > -     char buf[280];
> > +     char buf[BUFSZ];
> >       int me = -1;
> >       long count;
> >       pid_t pid;
> > @@ -180,7 +183,7 @@ static long count_clients(int clients)
> >   {
> >       struct dirent *de;
> >       long count = 0;
> > -     char buf[280];
> > +     char buf[BUFSZ];
> >       DIR *dir;
> >   
> >       dir = fdopendir(dup(clients));
> > @@ -229,32 +232,113 @@ static void create(int i915, int clients)
> >       igt_assert_eq(count_clients(clients), 1);
> >   }
> >   
> > +static const char *find_client(int clients, pid_t pid, char *buf)
> > +{
> > +     DIR *dir = fdopendir(dup(clients));
> > +
> > +     /* Reading a dir as it changes does not appear to be stable, SEP */
> 
> Oh yes, it is POSIX undefined as far as I could figure out. You are 
> creating/destroying clients while reading the dir from different 
> threads?

Different processes on different directory fd. man readdir(3) does say
that glibc is threadsafe, but that is not a requirement of POSIX. The
problem we are seeing is that the directory contents are not stable
around the readdir loop as clients are being created/destroyed, causing
dirents to be missed.

stracing the problem shows that glibc used a 32KiB buffer for getdents
and only a single syscall was required to grab the entire directory. No
errors were seen before the missed dirent. It just seemed to be missing.

Afaict there is no delay in creating the sysfs entry, and I think there
should also be no delay in creating the kernfs node and inodes, so my
initial assumption is that it's something not quite happy in the
kernfs directory that shows up under stress. A second loop ran for a
long time without incident locally, but CI seems much more adept at
finding it.

> I think best might be to introduce explicit sync points between 
> those two entities to make this reliable. In another review for the same 
> problem I suggested pipes for implementing this handshake. Along the 
> lines of "let child open/close some processes, make it wait for parent; 
> parent scans sysfs, signals child to open/close some more; repeat for a 
> while".

Sadly, I don't expect userspace to do that :)

I do expect userspace to search for its own client/ upon creation, and I
expect there to be many clients being opened at once. So I think so long
as the use of the library readdir is correct (distinct processes with
distinct directory fd, so I think there's no accidental sharing) this
represents. Hmm. fsync(dirfd).
-Chris
Chris Wilson Jan. 26, 2021, 10:07 p.m. UTC | #3
Quoting Chris Wilson (2021-01-26 22:04:35)
> I do expect userspace to search for its own client/ upon creation, and I
> expect there to be many clients being opened at once. So I think so long
> as the use of the library readdir is correct (distinct processes with
> distinct directory fd, so I think there's no accidental sharing) this
> represents. Hmm. fsync(dirfd).

fsync(dirfd) makes no difference.
-Chris
Tvrtko Ursulin Jan. 27, 2021, 9:12 a.m. UTC | #4
On 26/01/2021 22:04, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2021-01-26 16:47:14)
>> On 26/01/2021 13:05, Chris Wilson wrote:
>>> The client id used is a cyclic allocator as that reduces the likelihood
>>> of userspace seeing the same id used again (and so confusing the new
>>> client as the old). Verify that each new client has an id greater than
>>> the last.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>>> ---
>>>    tests/i915/sysfs_clients.c | 129 +++++++++++++++++++++++++++++++------
>>>    1 file changed, 108 insertions(+), 21 deletions(-)
>>>
>>> diff --git a/tests/i915/sysfs_clients.c b/tests/i915/sysfs_clients.c
>>> index a3a1f81e1..d2c1ebc5f 100644
>>> --- a/tests/i915/sysfs_clients.c
>>> +++ b/tests/i915/sysfs_clients.c
>>> @@ -8,6 +8,7 @@
>>>    #include <errno.h>
>>>    #include <fcntl.h>
>>>    #include <inttypes.h>
>>> +#include <limits.h>
>>>    #include <math.h>
>>>    #include <sched.h>
>>>    #include <sys/socket.h>
>>> @@ -47,6 +48,8 @@
>>>    #define assert_within_epsilon(x, ref, tolerance) \
>>>        __assert_within_epsilon(x, ref, tolerance / 100., tolerance / 100.)
>>>    
>>> +#define BUFSZ 280
>>> +
>>>    #define MI_BATCH_BUFFER_START (0x31 << 23)
>>>    #define MI_BATCH_BUFFER_END (0xa << 23)
>>>    #define MI_ARB_CHECK (0x5 << 23)
>>> @@ -75,7 +78,7 @@ static void pidname(int i915, int clients)
>>>    {
>>>        struct dirent *de;
>>>        int sv[2], rv[2];
>>> -     char buf[280];
>>> +     char buf[BUFSZ];
>>>        int me = -1;
>>>        long count;
>>>        pid_t pid;
>>> @@ -180,7 +183,7 @@ static long count_clients(int clients)
>>>    {
>>>        struct dirent *de;
>>>        long count = 0;
>>> -     char buf[280];
>>> +     char buf[BUFSZ];
>>>        DIR *dir;
>>>    
>>>        dir = fdopendir(dup(clients));
>>> @@ -229,32 +232,113 @@ static void create(int i915, int clients)
>>>        igt_assert_eq(count_clients(clients), 1);
>>>    }
>>>    
>>> +static const char *find_client(int clients, pid_t pid, char *buf)
>>> +{
>>> +     DIR *dir = fdopendir(dup(clients));
>>> +
>>> +     /* Reading a dir as it changes does not appear to be stable, SEP */
>>
>> Oh yes, it is POSIX undefined as far as I could figure out. You are
>> creating/destroying clients while reading the dir from different
>> threads?
> 
> Different processes on different directory fd. man readdir(3) does say
> that glibc is threadsafe, but that is not a requirement of POSIX. The
> problem we are seeing is that the directory contents are not stable
> around the readdir loop as clients are being created/destroyed, causing
> dirents to be missed.
> 
> stracing the problem shows that glibc used a 32KiB buffer for getdents
> and only a single syscall was required to grab the entire directory. No
> errors were seen before the missed dirent. It just seemed to be missing.
> 
> Afaict there is no delay in creating the sysfs entry, and I think there
> should also be no delay in creating the kernfs node and inodes, so my
> initial assumption is that it's something not quite happy in the
> kernfs directory that shows up under stress. A second loop ran for a
> long time without incident locally, but CI seems much more adept at
> finding it.

I think we are talking about the same thing..

>> I think best might be to introduce explicit sync points between
>> those two entities to make this reliable. In another review for the same
>> problem I suggested pipes for implementing this handshake. Along the
>> lines of "let child open/close some processes, make it wait for parent;
>> parent scans sysfs, signals child to open/close some more; repeat for a
>> while".
> 
> Sadly, I don't expect userspace to do that :)
> 
> I do expect userspace to search for its own client/ upon creation, and I
> expect there to be many clients being opened at once. So I think so long
> as the use of the library readdir is correct (distinct processes with
> distinct directory fd, so I think there's no accidental sharing) this
> represents. Hmm. fsync(dirfd).

...and I think it is just not guaranteed that reading the dir in 
parallel to dentries being created/deleted is guaranteed to work (no 
guarantee for discovering newly addded or deleted clients since the 
iteration started). Or you are saying that a client opens itself starts 
readdir loop and then fails to find itself?

Otherwise, I was thinking, if we wanted to allow clients to inspect 
themselves, we should really add a getrusage(2) like ioctl.

Regards,

Tvrtko
Chris Wilson Jan. 27, 2021, 9:51 a.m. UTC | #5
Quoting Tvrtko Ursulin (2021-01-27 09:12:00)
> 
> On 26/01/2021 22:04, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2021-01-26 16:47:14)
> >> On 26/01/2021 13:05, Chris Wilson wrote:
> >>> The client id used is a cyclic allocator as that reduces the likelihood
> >>> of userspace seeing the same id used again (and so confusing the new
> >>> client as the old). Verify that each new client has an id greater than
> >>> the last.
> >>>
> >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> >>> ---
> >>>    tests/i915/sysfs_clients.c | 129 +++++++++++++++++++++++++++++++------
> >>>    1 file changed, 108 insertions(+), 21 deletions(-)
> >>>
> >>> diff --git a/tests/i915/sysfs_clients.c b/tests/i915/sysfs_clients.c
> >>> index a3a1f81e1..d2c1ebc5f 100644
> >>> --- a/tests/i915/sysfs_clients.c
> >>> +++ b/tests/i915/sysfs_clients.c
> >>> @@ -8,6 +8,7 @@
> >>>    #include <errno.h>
> >>>    #include <fcntl.h>
> >>>    #include <inttypes.h>
> >>> +#include <limits.h>
> >>>    #include <math.h>
> >>>    #include <sched.h>
> >>>    #include <sys/socket.h>
> >>> @@ -47,6 +48,8 @@
> >>>    #define assert_within_epsilon(x, ref, tolerance) \
> >>>        __assert_within_epsilon(x, ref, tolerance / 100., tolerance / 100.)
> >>>    
> >>> +#define BUFSZ 280
> >>> +
> >>>    #define MI_BATCH_BUFFER_START (0x31 << 23)
> >>>    #define MI_BATCH_BUFFER_END (0xa << 23)
> >>>    #define MI_ARB_CHECK (0x5 << 23)
> >>> @@ -75,7 +78,7 @@ static void pidname(int i915, int clients)
> >>>    {
> >>>        struct dirent *de;
> >>>        int sv[2], rv[2];
> >>> -     char buf[280];
> >>> +     char buf[BUFSZ];
> >>>        int me = -1;
> >>>        long count;
> >>>        pid_t pid;
> >>> @@ -180,7 +183,7 @@ static long count_clients(int clients)
> >>>    {
> >>>        struct dirent *de;
> >>>        long count = 0;
> >>> -     char buf[280];
> >>> +     char buf[BUFSZ];
> >>>        DIR *dir;
> >>>    
> >>>        dir = fdopendir(dup(clients));
> >>> @@ -229,32 +232,113 @@ static void create(int i915, int clients)
> >>>        igt_assert_eq(count_clients(clients), 1);
> >>>    }
> >>>    
> >>> +static const char *find_client(int clients, pid_t pid, char *buf)
> >>> +{
> >>> +     DIR *dir = fdopendir(dup(clients));
> >>> +
> >>> +     /* Reading a dir as it changes does not appear to be stable, SEP */
> >>
> >> Oh yes, it is POSIX undefined as far as I could figure out. You are
> >> creating/destroying clients while reading the dir from different
> >> threads?
> > 
> > Different processes on different directory fd. man readdir(3) does say
> > that glibc is threadsafe, but that is not a requirement of POSIX. The
> > problem we are seeing is that the directory contents are not stable
> > around the readdir loop as clients are being created/destroyed, causing
> > dirents to be missed.
> > 
> > stracing the problem shows that glibc used a 32KiB buffer for getdents
> > and only a single syscall was required to grab the entire directory. No
> > errors were seen before the missed dirent. It just seemed to be missing.
> > 
> > Afaict there is no delay in creating the sysfs entry, and I think there
> > should also be no delay in creating the kernfs node and inodes, so my
> > initial assumption is that it's something not quite happy in the
> > kernfs directory that shows up under stress. A second loop ran for a
> > long time without incident locally, but CI seems much more adept at
> > finding it.
> 
> I think we are talking about the same thing..

The manpages only talked about the thread-safety of the library readdir,
and since you mentioned threads not processes, I was hopeful.

> >> I think best might be to introduce explicit sync points between
> >> those two entities to make this reliable. In another review for the same
> >> problem I suggested pipes for implementing this handshake. Along the
> >> lines of "let child open/close some processes, make it wait for parent;
> >> parent scans sysfs, signals child to open/close some more; repeat for a
> >> while".
> > 
> > Sadly, I don't expect userspace to do that :)
> > 
> > I do expect userspace to search for its own client/ upon creation, and I
> > expect there to be many clients being opened at once. So I think so long
> > as the use of the library readdir is correct (distinct processes with
> > distinct directory fd, so I think there's no accidental sharing) this
> > represents. Hmm. fsync(dirfd).
> 
> ...and I think it is just not guaranteed that reading the dir in 
> parallel to dentries being created/deleted is guaranteed to work (no 
> guarantee for discovering newly addded or deleted clients since the 
> iteration started). Or you are saying that a client opens itself starts 
> readdir loop and then fails to find itself?

That's exactly what happens -- but with multiple concurrent processes.
(Needs to run with one process to double check).

device = gem_reopen_driver(i915);
DIR fdopendir(open("/sysfs/clients", O_DIRECTORY))
rewinddir();
while (readdir) ...
=> this pid not found
	
> Otherwise, I was thinking, if we wanted to allow clients to inspect 
> themselves, we should really add a getrusage(2) like ioctl.

Maybe, but I've yet to see something that says a directory operation
prior to a opendir/rewinddir will not be visible in readdir. POSIX says
that directory operations in parallel to the readdir, whether or not
those are visible in the next readdir is undefined, but it seems to be
clear on operations prior to opening the directory.
-Chris
Tvrtko Ursulin Jan. 29, 2021, 9:18 a.m. UTC | #6
On 26/01/2021 13:05, Chris Wilson wrote:
> The client id used is a cyclic allocator as that reduces the likelihood
> of userspace seeing the same id used again (and so confusing the new
> client as the old). Verify that each new client has an id greater than
> the last.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> ---
>   tests/i915/sysfs_clients.c | 129 +++++++++++++++++++++++++++++++------
>   1 file changed, 108 insertions(+), 21 deletions(-)
> 
> diff --git a/tests/i915/sysfs_clients.c b/tests/i915/sysfs_clients.c
> index a3a1f81e1..d2c1ebc5f 100644
> --- a/tests/i915/sysfs_clients.c
> +++ b/tests/i915/sysfs_clients.c
> @@ -8,6 +8,7 @@
>   #include <errno.h>
>   #include <fcntl.h>
>   #include <inttypes.h>
> +#include <limits.h>
>   #include <math.h>
>   #include <sched.h>
>   #include <sys/socket.h>
> @@ -47,6 +48,8 @@
>   #define assert_within_epsilon(x, ref, tolerance) \
>   	__assert_within_epsilon(x, ref, tolerance / 100., tolerance / 100.)
>   
> +#define BUFSZ 280
> +
>   #define MI_BATCH_BUFFER_START (0x31 << 23)
>   #define MI_BATCH_BUFFER_END (0xa << 23)
>   #define MI_ARB_CHECK (0x5 << 23)
> @@ -75,7 +78,7 @@ static void pidname(int i915, int clients)
>   {
>   	struct dirent *de;
>   	int sv[2], rv[2];
> -	char buf[280];
> +	char buf[BUFSZ];
>   	int me = -1;
>   	long count;
>   	pid_t pid;
> @@ -180,7 +183,7 @@ static long count_clients(int clients)
>   {
>   	struct dirent *de;
>   	long count = 0;
> -	char buf[280];
> +	char buf[BUFSZ];
>   	DIR *dir;
>   
>   	dir = fdopendir(dup(clients));
> @@ -229,32 +232,113 @@ static void create(int i915, int clients)
>   	igt_assert_eq(count_clients(clients), 1);
>   }
>   
> +static const char *find_client(int clients, pid_t pid, char *buf)
> +{
> +	DIR *dir = fdopendir(dup(clients));
> +
> +	/* Reading a dir as it changes does not appear to be stable, SEP */
> +	for (int pass = 0; pass < 2; pass++) {
> +		struct dirent *de;
> +
> +		rewinddir(dir);
> +		while ((de = readdir(dir))) {
> +			if (!isdigit(de->d_name[0]))
> +				continue;
> +
> +			snprintf(buf, BUFSZ, "%s/pid", de->d_name);
> +			igt_sysfs_read(clients, buf, buf, sizeof(buf));
> +			if (atoi(buf) != pid)
> +				continue;
> +
> +			strncpy(buf, de->d_name, BUFSZ);
> +			goto out;
> +		}
> +	}
> +	*buf = '\0';
> +out:
> +	closedir(dir);
> +	return buf;
> +}
> +
>   static int find_me(int clients, pid_t pid)
>   {
> -	struct dirent *de;
> -	char buf[280];
> -	int me = -1;
> -	DIR *dir;
> +	char buf[BUFSZ];
>   
> -	dir = fdopendir(dup(clients));
> -	igt_assert(dir);
> -	rewinddir(dir);
> +	return openat(clients,
> +		      find_client(clients, pid, buf),
> +		      O_DIRECTORY | O_RDONLY);
> +}
>   
> -	while ((de = readdir(dir))) {
> -		if (!isdigit(de->d_name[0]))
> -			continue;
> +static int reopen_directory(int fd)
> +{
> +	char buf[BUFSZ];
> +	int dir;
>   
> -		snprintf(buf, sizeof(buf), "%s/pid", de->d_name);
> -		igt_sysfs_read(clients, buf, buf, sizeof(buf));
> -		if (atoi(buf) != pid)
> -			continue;
> +	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", fd);
> +	dir = open(buf, O_RDONLY);

Maybe O_DIRECTORY if it is open_directory.

> +	igt_assert_fd(dir);
>   
> -		me = openat(clients, de->d_name, O_DIRECTORY | O_RDONLY);
> -		break;
> +	return dir;
> +}
> +
> +static unsigned int my_id(int clients, pid_t pid)
> +{
> +	char buf[BUFSZ];
> +
> +	return atoi(find_client(clients, pid, buf));
> +}
> +
> +static unsigned int recycle_client(int i915, int clients)
> +{
> +	int device, client;
> +
> +	device = gem_reopen_driver(i915);
> +	client = my_id(clients, getpid());
> +	close(device);
> +
> +	return client;
> +}
> +
> +static void recycle(int i915, int clients)
> +{
> +	const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
> +
> +	/*
> +	 * As we open and close clients, we do not expect to reuse old ids,
> +	 * i.e. we use a cyclic ida. This reduces the likelihood of userspace
> +	 * watchers becoming confused and mistaking the new client as a
> +	 * continuation of the old.
> +	 */
> +	igt_require(my_id(clients, getpid()) < INT_MAX / 2);

Hm this is a bit dodgy - it will cause "permanent" skips if running the 
test in a loop. Just for the client > last assert below? I guess it is 
hard to handle wrap with forked clients.

> +	igt_assert(my_id(clients, getpid()));
> +
> +	igt_fork(child, 2 * ncpus) {
> +		unsigned int client, last;
> +
> +		/* Reopen the directory fd for each client */
> +		clients = reopen_directory(clients);
> +
> +		last = recycle_client(i915, clients);
> +		igt_info("Child[%d] first client:%d\n", getpid(), last);
> +		igt_until_timeout(5) {
> +			client = recycle_client(i915, clients);
> +
> +			/* In 5 seconds we are not going to exhaust the ids */
> +			igt_assert(client > last);
> +			last = client;
> +		}
> +		igt_info("Child[%d] last client:%d\n", getpid(), last);
>   	}
> +	igt_waitchildren();
>   
> -	closedir(dir);
> -	return me;
> +	/* Cleanup delayed behind rcu */
> +	igt_until_timeout(30) {
> +		sched_yield();
> +		if (count_clients(clients) == 1)
> +			break;
> +		usleep(10000);
> +	}
> +	igt_assert_eq(count_clients(clients), 1);
>   }
>   
>   static int64_t read_runtime(int client, int class)
> @@ -719,7 +803,7 @@ sema(int i915, int clients, const struct intel_execution_engine2 *e, int f)
>   static int read_all(int clients, pid_t pid, int class, uint64_t *runtime)
>   {
>   	struct dirent *de;
> -	char buf[280];
> +	char buf[BUFSZ];
>   	int count = 0;
>   	DIR *dir;
>   
> @@ -958,6 +1042,9 @@ igt_main
>   	igt_subtest("create")
>   		create(i915, clients);
>   
> +	igt_subtest("recycle")
> +		recycle(i915, clients);
> +
>   	igt_subtest_group
>   		test_busy(i915, clients);
>   
> 

Okay better than nothing.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
Chris Wilson Jan. 29, 2021, 9:52 a.m. UTC | #7
Quoting Tvrtko Ursulin (2021-01-29 09:18:50)
> 
> 
> On 26/01/2021 13:05, Chris Wilson wrote:
> > The client id used is a cyclic allocator as that reduces the likelihood
> > of userspace seeing the same id used again (and so confusing the new
> > client as the old). Verify that each new client has an id greater than
> > the last.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> > ---
> >   tests/i915/sysfs_clients.c | 129 +++++++++++++++++++++++++++++++------
> >   1 file changed, 108 insertions(+), 21 deletions(-)
> > 
> > diff --git a/tests/i915/sysfs_clients.c b/tests/i915/sysfs_clients.c
> > index a3a1f81e1..d2c1ebc5f 100644
> > --- a/tests/i915/sysfs_clients.c
> > +++ b/tests/i915/sysfs_clients.c
> > @@ -8,6 +8,7 @@
> >   #include <errno.h>
> >   #include <fcntl.h>
> >   #include <inttypes.h>
> > +#include <limits.h>
> >   #include <math.h>
> >   #include <sched.h>
> >   #include <sys/socket.h>
> > @@ -47,6 +48,8 @@
> >   #define assert_within_epsilon(x, ref, tolerance) \
> >       __assert_within_epsilon(x, ref, tolerance / 100., tolerance / 100.)
> >   
> > +#define BUFSZ 280
> > +
> >   #define MI_BATCH_BUFFER_START (0x31 << 23)
> >   #define MI_BATCH_BUFFER_END (0xa << 23)
> >   #define MI_ARB_CHECK (0x5 << 23)
> > @@ -75,7 +78,7 @@ static void pidname(int i915, int clients)
> >   {
> >       struct dirent *de;
> >       int sv[2], rv[2];
> > -     char buf[280];
> > +     char buf[BUFSZ];
> >       int me = -1;
> >       long count;
> >       pid_t pid;
> > @@ -180,7 +183,7 @@ static long count_clients(int clients)
> >   {
> >       struct dirent *de;
> >       long count = 0;
> > -     char buf[280];
> > +     char buf[BUFSZ];
> >       DIR *dir;
> >   
> >       dir = fdopendir(dup(clients));
> > @@ -229,32 +232,113 @@ static void create(int i915, int clients)
> >       igt_assert_eq(count_clients(clients), 1);
> >   }
> >   
> > +static const char *find_client(int clients, pid_t pid, char *buf)
> > +{
> > +     DIR *dir = fdopendir(dup(clients));
> > +
> > +     /* Reading a dir as it changes does not appear to be stable, SEP */
> > +     for (int pass = 0; pass < 2; pass++) {
> > +             struct dirent *de;
> > +
> > +             rewinddir(dir);
> > +             while ((de = readdir(dir))) {
> > +                     if (!isdigit(de->d_name[0]))
> > +                             continue;
> > +
> > +                     snprintf(buf, BUFSZ, "%s/pid", de->d_name);
> > +                     igt_sysfs_read(clients, buf, buf, sizeof(buf));
> > +                     if (atoi(buf) != pid)
> > +                             continue;
> > +
> > +                     strncpy(buf, de->d_name, BUFSZ);
> > +                     goto out;
> > +             }
> > +     }
> > +     *buf = '\0';
> > +out:
> > +     closedir(dir);
> > +     return buf;
> > +}
> > +
> >   static int find_me(int clients, pid_t pid)
> >   {
> > -     struct dirent *de;
> > -     char buf[280];
> > -     int me = -1;
> > -     DIR *dir;
> > +     char buf[BUFSZ];
> >   
> > -     dir = fdopendir(dup(clients));
> > -     igt_assert(dir);
> > -     rewinddir(dir);
> > +     return openat(clients,
> > +                   find_client(clients, pid, buf),
> > +                   O_DIRECTORY | O_RDONLY);
> > +}
> >   
> > -     while ((de = readdir(dir))) {
> > -             if (!isdigit(de->d_name[0]))
> > -                     continue;
> > +static int reopen_directory(int fd)
> > +{
> > +     char buf[BUFSZ];
> > +     int dir;
> >   
> > -             snprintf(buf, sizeof(buf), "%s/pid", de->d_name);
> > -             igt_sysfs_read(clients, buf, buf, sizeof(buf));
> > -             if (atoi(buf) != pid)
> > -                     continue;
> > +     snprintf(buf, sizeof(buf), "/proc/self/fd/%d", fd);
> > +     dir = open(buf, O_RDONLY);
> 
> Maybe O_DIRECTORY if it is open_directory.
> 
> > +     igt_assert_fd(dir);
> >   
> > -             me = openat(clients, de->d_name, O_DIRECTORY | O_RDONLY);
> > -             break;
> > +     return dir;
> > +}
> > +
> > +static unsigned int my_id(int clients, pid_t pid)
> > +{
> > +     char buf[BUFSZ];
> > +
> > +     return atoi(find_client(clients, pid, buf));
> > +}
> > +
> > +static unsigned int recycle_client(int i915, int clients)
> > +{
> > +     int device, client;
> > +
> > +     device = gem_reopen_driver(i915);
> > +     client = my_id(clients, getpid());
> > +     close(device);
> > +
> > +     return client;
> > +}
> > +
> > +static void recycle(int i915, int clients)
> > +{
> > +     const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
> > +
> > +     /*
> > +      * As we open and close clients, we do not expect to reuse old ids,
> > +      * i.e. we use a cyclic ida. This reduces the likelihood of userspace
> > +      * watchers becoming confused and mistaking the new client as a
> > +      * continuation of the old.
> > +      */
> > +     igt_require(my_id(clients, getpid()) < INT_MAX / 2);
> 
> Hm this is a bit dodgy - it will cause "permanent" skips if running the 
> test in a loop. Just for the client > last assert below? I guess it is 
> hard to handle wrap with forked clients.

It takes about a day to reach 2 billion ids on a fast machine. For CI, I
think we are safe.

We could do the (int)(A - B) > 0 to handle the wrap, that would be more
sensible.

[...]

> Okay better than nothing.

But first we need to resolve the failure to find itself. :(
-Chris
Tvrtko Ursulin Jan. 29, 2021, 6:30 p.m. UTC | #8
On 29/01/2021 09:52, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2021-01-29 09:18:50)
>> On 26/01/2021 13:05, Chris Wilson wrote:
>>> The client id used is a cyclic allocator as that reduces the likelihood
>>> of userspace seeing the same id used again (and so confusing the new
>>> client as the old). Verify that each new client has an id greater than
>>> the last.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
>>> ---
>>>    tests/i915/sysfs_clients.c | 129 +++++++++++++++++++++++++++++++------
>>>    1 file changed, 108 insertions(+), 21 deletions(-)
>>>
>>> diff --git a/tests/i915/sysfs_clients.c b/tests/i915/sysfs_clients.c
>>> index a3a1f81e1..d2c1ebc5f 100644
>>> --- a/tests/i915/sysfs_clients.c
>>> +++ b/tests/i915/sysfs_clients.c
>>> @@ -8,6 +8,7 @@
>>>    #include <errno.h>
>>>    #include <fcntl.h>
>>>    #include <inttypes.h>
>>> +#include <limits.h>
>>>    #include <math.h>
>>>    #include <sched.h>
>>>    #include <sys/socket.h>
>>> @@ -47,6 +48,8 @@
>>>    #define assert_within_epsilon(x, ref, tolerance) \
>>>        __assert_within_epsilon(x, ref, tolerance / 100., tolerance / 100.)
>>>    
>>> +#define BUFSZ 280
>>> +
>>>    #define MI_BATCH_BUFFER_START (0x31 << 23)
>>>    #define MI_BATCH_BUFFER_END (0xa << 23)
>>>    #define MI_ARB_CHECK (0x5 << 23)
>>> @@ -75,7 +78,7 @@ static void pidname(int i915, int clients)
>>>    {
>>>        struct dirent *de;
>>>        int sv[2], rv[2];
>>> -     char buf[280];
>>> +     char buf[BUFSZ];
>>>        int me = -1;
>>>        long count;
>>>        pid_t pid;
>>> @@ -180,7 +183,7 @@ static long count_clients(int clients)
>>>    {
>>>        struct dirent *de;
>>>        long count = 0;
>>> -     char buf[280];
>>> +     char buf[BUFSZ];
>>>        DIR *dir;
>>>    
>>>        dir = fdopendir(dup(clients));
>>> @@ -229,32 +232,113 @@ static void create(int i915, int clients)
>>>        igt_assert_eq(count_clients(clients), 1);
>>>    }
>>>    
>>> +static const char *find_client(int clients, pid_t pid, char *buf)
>>> +{
>>> +     DIR *dir = fdopendir(dup(clients));
>>> +
>>> +     /* Reading a dir as it changes does not appear to be stable, SEP */
>>> +     for (int pass = 0; pass < 2; pass++) {
>>> +             struct dirent *de;
>>> +
>>> +             rewinddir(dir);
>>> +             while ((de = readdir(dir))) {
>>> +                     if (!isdigit(de->d_name[0]))
>>> +                             continue;
>>> +
>>> +                     snprintf(buf, BUFSZ, "%s/pid", de->d_name);
>>> +                     igt_sysfs_read(clients, buf, buf, sizeof(buf));
>>> +                     if (atoi(buf) != pid)
>>> +                             continue;
>>> +
>>> +                     strncpy(buf, de->d_name, BUFSZ);
>>> +                     goto out;
>>> +             }
>>> +     }
>>> +     *buf = '\0';
>>> +out:
>>> +     closedir(dir);
>>> +     return buf;
>>> +}
>>> +
>>>    static int find_me(int clients, pid_t pid)
>>>    {
>>> -     struct dirent *de;
>>> -     char buf[280];
>>> -     int me = -1;
>>> -     DIR *dir;
>>> +     char buf[BUFSZ];
>>>    
>>> -     dir = fdopendir(dup(clients));
>>> -     igt_assert(dir);
>>> -     rewinddir(dir);
>>> +     return openat(clients,
>>> +                   find_client(clients, pid, buf),
>>> +                   O_DIRECTORY | O_RDONLY);
>>> +}
>>>    
>>> -     while ((de = readdir(dir))) {
>>> -             if (!isdigit(de->d_name[0]))
>>> -                     continue;
>>> +static int reopen_directory(int fd)
>>> +{
>>> +     char buf[BUFSZ];
>>> +     int dir;
>>>    
>>> -             snprintf(buf, sizeof(buf), "%s/pid", de->d_name);
>>> -             igt_sysfs_read(clients, buf, buf, sizeof(buf));
>>> -             if (atoi(buf) != pid)
>>> -                     continue;
>>> +     snprintf(buf, sizeof(buf), "/proc/self/fd/%d", fd);
>>> +     dir = open(buf, O_RDONLY);
>>
>> Maybe O_DIRECTORY if it is open_directory.
>>
>>> +     igt_assert_fd(dir);
>>>    
>>> -             me = openat(clients, de->d_name, O_DIRECTORY | O_RDONLY);
>>> -             break;
>>> +     return dir;
>>> +}
>>> +
>>> +static unsigned int my_id(int clients, pid_t pid)
>>> +{
>>> +     char buf[BUFSZ];
>>> +
>>> +     return atoi(find_client(clients, pid, buf));
>>> +}
>>> +
>>> +static unsigned int recycle_client(int i915, int clients)
>>> +{
>>> +     int device, client;
>>> +
>>> +     device = gem_reopen_driver(i915);
>>> +     client = my_id(clients, getpid());
>>> +     close(device);
>>> +
>>> +     return client;
>>> +}
>>> +
>>> +static void recycle(int i915, int clients)
>>> +{
>>> +     const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
>>> +
>>> +     /*
>>> +      * As we open and close clients, we do not expect to reuse old ids,
>>> +      * i.e. we use a cyclic ida. This reduces the likelihood of userspace
>>> +      * watchers becoming confused and mistaking the new client as a
>>> +      * continuation of the old.
>>> +      */
>>> +     igt_require(my_id(clients, getpid()) < INT_MAX / 2);
>>
>> Hm this is a bit dodgy - it will cause "permanent" skips if running the
>> test in a loop. Just for the client > last assert below? I guess it is
>> hard to handle wrap with forked clients.
> 
> It takes about a day to reach 2 billion ids on a fast machine. For CI, I
> think we are safe.
> 
> We could do the (int)(A - B) > 0 to handle the wrap, that would be more
> sensible.
> 
> [...]
> 
>> Okay better than nothing.
> 
> But first we need to resolve the failure to find itself. :(

Forgot about that.. Start with one child for now?

Regards,

Tvrtko
diff mbox series

Patch

diff --git a/tests/i915/sysfs_clients.c b/tests/i915/sysfs_clients.c
index a3a1f81e1..d2c1ebc5f 100644
--- a/tests/i915/sysfs_clients.c
+++ b/tests/i915/sysfs_clients.c
@@ -8,6 +8,7 @@ 
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
+#include <limits.h>
 #include <math.h>
 #include <sched.h>
 #include <sys/socket.h>
@@ -47,6 +48,8 @@ 
 #define assert_within_epsilon(x, ref, tolerance) \
 	__assert_within_epsilon(x, ref, tolerance / 100., tolerance / 100.)
 
+#define BUFSZ 280
+
 #define MI_BATCH_BUFFER_START (0x31 << 23)
 #define MI_BATCH_BUFFER_END (0xa << 23)
 #define MI_ARB_CHECK (0x5 << 23)
@@ -75,7 +78,7 @@  static void pidname(int i915, int clients)
 {
 	struct dirent *de;
 	int sv[2], rv[2];
-	char buf[280];
+	char buf[BUFSZ];
 	int me = -1;
 	long count;
 	pid_t pid;
@@ -180,7 +183,7 @@  static long count_clients(int clients)
 {
 	struct dirent *de;
 	long count = 0;
-	char buf[280];
+	char buf[BUFSZ];
 	DIR *dir;
 
 	dir = fdopendir(dup(clients));
@@ -229,32 +232,113 @@  static void create(int i915, int clients)
 	igt_assert_eq(count_clients(clients), 1);
 }
 
+static const char *find_client(int clients, pid_t pid, char *buf)
+{
+	DIR *dir = fdopendir(dup(clients));
+
+	/* Reading a dir as it changes does not appear to be stable, SEP */
+	for (int pass = 0; pass < 2; pass++) {
+		struct dirent *de;
+
+		rewinddir(dir);
+		while ((de = readdir(dir))) {
+			if (!isdigit(de->d_name[0]))
+				continue;
+
+			snprintf(buf, BUFSZ, "%s/pid", de->d_name);
+			igt_sysfs_read(clients, buf, buf, sizeof(buf));
+			if (atoi(buf) != pid)
+				continue;
+
+			strncpy(buf, de->d_name, BUFSZ);
+			goto out;
+		}
+	}
+	*buf = '\0';
+out:
+	closedir(dir);
+	return buf;
+}
+
 static int find_me(int clients, pid_t pid)
 {
-	struct dirent *de;
-	char buf[280];
-	int me = -1;
-	DIR *dir;
+	char buf[BUFSZ];
 
-	dir = fdopendir(dup(clients));
-	igt_assert(dir);
-	rewinddir(dir);
+	return openat(clients,
+		      find_client(clients, pid, buf),
+		      O_DIRECTORY | O_RDONLY);
+}
 
-	while ((de = readdir(dir))) {
-		if (!isdigit(de->d_name[0]))
-			continue;
+static int reopen_directory(int fd)
+{
+	char buf[BUFSZ];
+	int dir;
 
-		snprintf(buf, sizeof(buf), "%s/pid", de->d_name);
-		igt_sysfs_read(clients, buf, buf, sizeof(buf));
-		if (atoi(buf) != pid)
-			continue;
+	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", fd);
+	dir = open(buf, O_RDONLY);
+	igt_assert_fd(dir);
 
-		me = openat(clients, de->d_name, O_DIRECTORY | O_RDONLY);
-		break;
+	return dir;
+}
+
+static unsigned int my_id(int clients, pid_t pid)
+{
+	char buf[BUFSZ];
+
+	return atoi(find_client(clients, pid, buf));
+}
+
+static unsigned int recycle_client(int i915, int clients)
+{
+	int device, client;
+
+	device = gem_reopen_driver(i915);
+	client = my_id(clients, getpid());
+	close(device);
+
+	return client;
+}
+
+static void recycle(int i915, int clients)
+{
+	const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+	/*
+	 * As we open and close clients, we do not expect to reuse old ids,
+	 * i.e. we use a cyclic ida. This reduces the likelihood of userspace
+	 * watchers becoming confused and mistaking the new client as a
+	 * continuation of the old.
+	 */
+	igt_require(my_id(clients, getpid()) < INT_MAX / 2);
+	igt_assert(my_id(clients, getpid()));
+
+	igt_fork(child, 2 * ncpus) {
+		unsigned int client, last;
+
+		/* Reopen the directory fd for each client */
+		clients = reopen_directory(clients);
+
+		last = recycle_client(i915, clients);
+		igt_info("Child[%d] first client:%d\n", getpid(), last);
+		igt_until_timeout(5) {
+			client = recycle_client(i915, clients);
+
+			/* In 5 seconds we are not going to exhaust the ids */
+			igt_assert(client > last);
+			last = client;
+		}
+		igt_info("Child[%d] last client:%d\n", getpid(), last);
 	}
+	igt_waitchildren();
 
-	closedir(dir);
-	return me;
+	/* Cleanup delayed behind rcu */
+	igt_until_timeout(30) {
+		sched_yield();
+		if (count_clients(clients) == 1)
+			break;
+		usleep(10000);
+	}
+	igt_assert_eq(count_clients(clients), 1);
 }
 
 static int64_t read_runtime(int client, int class)
@@ -719,7 +803,7 @@  sema(int i915, int clients, const struct intel_execution_engine2 *e, int f)
 static int read_all(int clients, pid_t pid, int class, uint64_t *runtime)
 {
 	struct dirent *de;
-	char buf[280];
+	char buf[BUFSZ];
 	int count = 0;
 	DIR *dir;
 
@@ -958,6 +1042,9 @@  igt_main
 	igt_subtest("create")
 		create(i915, clients);
 
+	igt_subtest("recycle")
+		recycle(i915, clients);
+
 	igt_subtest_group
 		test_busy(i915, clients);