[12/11,TESTSUITE] audit_testsuite: Add stress test for tree watches
diff mbox series

Message ID 20180904160632.21210-13-jack@suse.cz
State New
Headers show
Series
  • audit: Fix various races when tagging and untagging mounts
Related show

Commit Message

Jan Kara Sept. 4, 2018, 4:06 p.m. UTC
Add stress test for stressing audit tree watches by adding and deleting
rules while events are generated and watched filesystems are mounted and
unmounted in parallel.

Signed-off-by: Jan Kara <jack@suse.cz>
---
 tests/stress_tree/Makefile |   8 +++
 tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 179 insertions(+)
 create mode 100644 tests/stress_tree/Makefile
 create mode 100755 tests/stress_tree/test

Comments

Richard Guy Briggs Sept. 14, 2018, 6:21 p.m. UTC | #1
On 2018-09-04 18:06, Jan Kara wrote:
> Add stress test for stressing audit tree watches by adding and deleting
> rules while events are generated and watched filesystems are mounted and
> unmounted in parallel.

A couple of minor comments below, but otherwise looks reasonable to me.
Reviewed-by: Richard Guy Briggs <rgb@redhat.com>

I assume you've tested this with more than $dirs = 4 and sleep(60)?

> Signed-off-by: Jan Kara <jack@suse.cz>
> ---
>  tests/stress_tree/Makefile |   8 +++
>  tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 179 insertions(+)
>  create mode 100644 tests/stress_tree/Makefile
>  create mode 100755 tests/stress_tree/test
> 
> diff --git a/tests/stress_tree/Makefile b/tests/stress_tree/Makefile
> new file mode 100644
> index 000000000000..7ade09aad86f
> --- /dev/null
> +++ b/tests/stress_tree/Makefile
> @@ -0,0 +1,8 @@
> +TARGETS=$(patsubst %.c,%,$(wildcard *.c))
> +
> +LDLIBS += -lpthread
> +
> +all: $(TARGETS)
> +clean:
> +	rm -f $(TARGETS)
> +
> diff --git a/tests/stress_tree/test b/tests/stress_tree/test
> new file mode 100755
> index 000000000000..6215bec810d1
> --- /dev/null
> +++ b/tests/stress_tree/test
> @@ -0,0 +1,171 @@
> +#!/usr/bin/perl
> +
> +use strict;
> +
> +use Test;
> +BEGIN { plan tests => 1 }
> +
> +use File::Temp qw/ tempdir tempfile /;
> +
> +###
> +# functions
> +
> +sub key_gen {
> +	my @chars = ( "A" .. "Z", "a" .. "z" );
> +	my $key = "testsuite-" . time . "-";
> +	$key .= $chars[ rand @chars ] for 1 .. 8;
> +	return $key;
> +}
> +
> +# Run stat on random files in subtrees to generate audit events
> +sub run_stat {
> +	my($dir,$dirs) = @_;
> +	my $path;
> +
> +	while (1) {
> +		$path = "$dir/mnt/mnt".int(rand($dirs))."/subdir".int(rand($dirs));
> +		stat($path);
> +	}
> +}
> +
> +# Generate audit rules for subtrees. Do one rule per subtree. Because watch
> +# recursively iterates child mounts and we mount $dir/leaf$i under various
> +# subtrees, the inode corresponding to $dir/leaf$i gets tagged by different
> +# trees.
> +sub run_mark_audit {
> +	my($dir,$dirs,$key) = @_;
> +
> +	while (1) {
> +		for (my $i=0; $i < $dirs; $i++) {
> +			system("auditctl -w $dir/mnt/mnt$i -p r -k $key");
> +		}
> +		system("auditctl -D -k $key >& /dev/null");
> +	}
> +}
> +
> +sub umount_all {
> +	my($dir,$dirs,$ignore_fail) = @_;
> +
> +	for (my $i=0; $i < $dirs; $i++) {
> +		while (system("umount $dir/leaf$i >& /dev/null") > 0 &&
> +		       $ignore_fail == 0) {
> +			# Nothing - loop until umount succeeds
> +		}
> +	}

Shouldn't this set of tmpfs be unmounted after the bind mounts that
follow, in reverse order to the way they were mounted?

> +	for (my $i=0; $i < $dirs; $i++) {
> +		for (my $j=0; $j < $dirs; $j++) {
> +			while (system("umount $dir/mnt/mnt$i/subdir$j >& /dev/null") > 0 &&
> +			       $ignore_fail == 0) {
> +				# Nothing - loop until umount succeeds
> +			}
> +		}
> +		while (system("umount $dir/mnt/mnt$i >& /dev/null") > 0 &&
> +		       $ignore_fail == 0) {
> +			# Nothing - loop until umount succeeds
> +		}
> +	}
> +}
> +
> +# Mount and unmount filesystems. We pick random leaf mount so that sometimes
> +# a leaf mount point root inode will gather more tags from different trees
> +# and sometimes we will be quicker in unmounting all instances of leaf and
> +# thus excercise inode evistion path
> +sub run_mount {
> +	my($dir,$dirs) = @_;
> +
> +	while (1) {
> +		# We use tmpfs here and not just bind mounts of some dir so
> +		# that the root inode gets evicted once all instances are
> +		# unmounted.
> +		for (my $i=0; $i < $dirs; $i++) {
> +			system("mount -t tmpfs none $dir/leaf$i");
> +		}
> +		for (my $i=0; $i < $dirs; $i++) {
> +			system("mount --bind $dir/dir$i $dir/mnt/mnt$i");
> +			for (my $j=0; $j < $dirs; $j++) {
> +				my $leaf="$dir/leaf".int(rand($dirs));
> +				system("mount --bind $leaf $dir/mnt/mnt$i/subdir$j");
> +			}
> +		}
> +		umount_all($dir, $dirs, 0);
> +	}
> +}
> +
> +
> +###
> +# setup
> +
> +# reset audit
> +system("auditctl -D >& /dev/null");
> +
> +# create temp directory
> +my $dir = tempdir( TEMPLATE => '/tmp/audit-testsuite-XXXX', CLEANUP => 1 );
> +
> +# create stdout/stderr sinks
> +( my $fh_out, my $stdout ) = tempfile(
> +	TEMPLATE => '/tmp/audit-testsuite-out-XXXX',
> +	UNLINK   => 1
> +);
> +( my $fh_err, my $stderr ) = tempfile(
> +	TEMPLATE => '/tmp/audit-testsuite-err-XXXX',
> +	UNLINK   => 1
> +);
> +
> +###
> +# tests
> +
> +my $dirs = 4;
> +
> +# setup directory hierarchy
> +for (my $i=0; $i < $dirs; $i++) {
> +	mkdir $dir."/dir".$i;
> +	for (my $j=0; $j < $dirs; $j++) {
> +		mkdir $dir."/dir".$i."/subdir".$j;
> +	}
> +}
> +mkdir "$dir/mnt";
> +for (my $i=0; $i < $dirs; $i++) {
> +	mkdir "$dir/mnt/mnt$i";
> +	mkdir "$dir/leaf$i";
> +}
> +
> +my $stat_pid = fork();
> +
> +if ($stat_pid == 0) {
> +	run_stat($dir, $dirs);
> +	# Never reached
> +	exit;
> +}
> +
> +my $mount_pid = fork();
> +
> +if ($mount_pid == 0) {
> +	run_mount($dir, $dirs);
> +	# Never reached
> +	exit;
> +}
> +
> +my $key = key_gen();
> +
> +my $audit_pid = fork();
> +
> +if ($audit_pid == 0) {
> +	run_mark_audit($dir, $dirs, $key);
> +	# Never reached
> +	exit;
> +}
> +
> +# Sleep for a minute to let stress test run...
> +sleep(60);
> +ok(1);
> +
> +###
> +# cleanup
> +
> +kill('KILL', $stat_pid, $mount_pid, $audit_pid);
> +# Wait for children to terminate
> +waitpid($stat_pid, 0);
> +waitpid($mount_pid, 0);
> +waitpid($audit_pid, 0);
> +system("auditctl -D >& /dev/null");
> +umount_all($dir, $dirs, 1);

Should all the subdirectories in the temp directory be deleted, or are
they all cleaned up recursively by the tempdir command?

- RGB

--
Richard Guy Briggs <rgb@redhat.com>
Sr. S/W Engineer, Kernel Security, Base Operating Systems
Remote, Ottawa, Red Hat Canada
IRC: rgb, SunRaycer
Voice: +1.647.777.2635, Internal: (81) 32635
Jan Kara Sept. 17, 2018, 4:56 p.m. UTC | #2
On Fri 14-09-18 14:21:04, Richard Guy Briggs wrote:
> On 2018-09-04 18:06, Jan Kara wrote:
> > Add stress test for stressing audit tree watches by adding and deleting
> > rules while events are generated and watched filesystems are mounted and
> > unmounted in parallel.
> 
> A couple of minor comments below, but otherwise looks reasonable to me.
> Reviewed-by: Richard Guy Briggs <rgb@redhat.com>
> 
> I assume you've tested this with more than $dirs = 4 and sleep(60)?

I've tested it to run for longer time (couple hours) but I don't think I've
tested more directories. I can try that out.

> > +sub umount_all {
> > +	my($dir,$dirs,$ignore_fail) = @_;
> > +
> > +	for (my $i=0; $i < $dirs; $i++) {
> > +		while (system("umount $dir/leaf$i >& /dev/null") > 0 &&
> > +		       $ignore_fail == 0) {
> > +			# Nothing - loop until umount succeeds
> > +		}
> > +	}
> 
> Shouldn't this set of tmpfs be unmounted after the bind mounts that
> follow, in reverse order to the way they were mounted?

The order does not really matter. Once bind mount is created, it is
independent entity from the original mount (well, except for possible mount
inheritance) so you can unmount them in arbitrary order.

> > +# create temp directory
> > +my $dir = tempdir( TEMPLATE => '/tmp/audit-testsuite-XXXX', CLEANUP => 1 );
> > +
> > +# create stdout/stderr sinks
> > +( my $fh_out, my $stdout ) = tempfile(
> > +	TEMPLATE => '/tmp/audit-testsuite-out-XXXX',
> > +	UNLINK   => 1
> > +);
> > +( my $fh_err, my $stderr ) = tempfile(
> > +	TEMPLATE => '/tmp/audit-testsuite-err-XXXX',
> > +	UNLINK   => 1
> > +);
> > +
> > +###
> > +# tests
> > +
> > +my $dirs = 4;
> > +
> > +# setup directory hierarchy
> > +for (my $i=0; $i < $dirs; $i++) {
> > +	mkdir $dir."/dir".$i;
> > +	for (my $j=0; $j < $dirs; $j++) {
> > +		mkdir $dir."/dir".$i."/subdir".$j;
> > +	}
> > +}
> > +mkdir "$dir/mnt";
> > +for (my $i=0; $i < $dirs; $i++) {
> > +	mkdir "$dir/mnt/mnt$i";
> > +	mkdir "$dir/leaf$i";
> > +}
> > +
> > +my $stat_pid = fork();
> > +
> > +if ($stat_pid == 0) {
> > +	run_stat($dir, $dirs);
> > +	# Never reached
> > +	exit;
> > +}
> > +
> > +my $mount_pid = fork();
> > +
> > +if ($mount_pid == 0) {
> > +	run_mount($dir, $dirs);
> > +	# Never reached
> > +	exit;
> > +}
> > +
> > +my $key = key_gen();
> > +
> > +my $audit_pid = fork();
> > +
> > +if ($audit_pid == 0) {
> > +	run_mark_audit($dir, $dirs, $key);
> > +	# Never reached
> > +	exit;
> > +}
> > +
> > +# Sleep for a minute to let stress test run...
> > +sleep(60);
> > +ok(1);
> > +
> > +###
> > +# cleanup
> > +
> > +kill('KILL', $stat_pid, $mount_pid, $audit_pid);
> > +# Wait for children to terminate
> > +waitpid($stat_pid, 0);
> > +waitpid($mount_pid, 0);
> > +waitpid($audit_pid, 0);
> > +system("auditctl -D >& /dev/null");
> > +umount_all($dir, $dirs, 1);
> 
> Should all the subdirectories in the temp directory be deleted, or are
> they all cleaned up recursively by the tempdir command?

The CLEANUP / UNLINK parameters to tempdir() and tempfile() functions
should make sure everything is removed on exit (including possible
subdirs).

								Honza
Paul Moore Oct. 5, 2018, 9:06 p.m. UTC | #3
On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> Add stress test for stressing audit tree watches by adding and deleting
> rules while events are generated and watched filesystems are mounted and
> unmounted in parallel.
>
> Signed-off-by: Jan Kara <jack@suse.cz>
> ---
>  tests/stress_tree/Makefile |   8 +++
>  tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 179 insertions(+)
>  create mode 100644 tests/stress_tree/Makefile
>  create mode 100755 tests/stress_tree/test

No commentary on the test itself, other than perhaps it should live
under test_manual/, but in running the tests in a loop today I am
reliably able to panic my test kernel after ~30m or so.

I'm using the kernel linked below which is Fedora Rawhide +
selinux/next + audit/next + audit/working-fsnotify_fixes; a link to
the patches added to the Rawhide kernel can be found in the list
archive linked below.

* https://groups.google.com/forum/#!topic/kernel-secnext/SFv0d-ij3z8
* https://copr.fedorainfracloud.org/coprs/pcmoore/kernel-secnext/build/805949

The initial panic dump is below:

[  139.619065] list_del corruption. prev->next should be
ffff985fa98d4100, but was ffff985fae91e370
[  139.622504] ------------[ cut here ]------------
[  139.623402] kernel BUG at lib/list_debug.c:53!
[  139.624294] invalid opcode: 0000 [#1] SMP PTI
[  139.625439] CPU: 1 PID: 3248 Comm: auditctl Not tainted
4.19.0-0.rc6.git2.1.1.secnext.fc30.x86_64 #1
[  139.630761] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[  139.631812] RIP: 0010:__list_del_entry_valid.cold.1+0x34/0x4c
[  139.632853] Code: ce 34 ac e8 b8 f7 c0 ff 0f 0b 48 c7 c7 a8 cf 34
ac e8 aa f7 c0 ff 0f 0b 48 89 f2 48 89 fe 48 c7 c7 68 cf 34 ac e8 96
f7 c0 ff <0f> 0b 48 89 fe 48 c7 c7 30 cf 34 ac e8 85 f7 c0 ff 0f 0b 90
90 90
[  139.636347] RSP: 0018:ffffb4890293fbb8 EFLAGS: 00010246
[  139.637295] RAX: 0000000000000054 RBX: ffff985fae91e620 RCX: 0000000000000000
[  139.638573] RDX: 0000000000000000 RSI: ffff985fb77d6be8 RDI: ffff985fb77d6be8
[  139.639855] RBP: ffff985fa98d40c0 R08: 00046c1ac1fe8d00 R09: 0000000000000000
[  139.641136] R10: 0000000000000000 R11: 0000000000000000 R12: ffff985fa98d4100
[  139.642416] R13: 0000000000000000 R14: ffff985faf00fe20 R15: 00000000000003f4
[  139.643699] FS:  00007f9898252b80(0000) GS:ffff985fb7600000(0000)
knlGS:0000000000000000
[  139.645199] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  139.646244] CR2: 0000561d333b8218 CR3: 000000023110a000 CR4: 00000000001406e0
[  139.647533] Call Trace:
[  139.647997]  audit_remove_tree_rule+0xad/0x160
[  139.648819]  audit_del_rule+0x90/0x190
[  139.649507]  audit_rule_change+0x98e/0xbf0
[  139.650259]  audit_receive_msg+0x142/0x1160
[  139.651030]  ? netlink_deliver_tap+0x99/0x410
[  139.651832]  audit_receive+0x54/0xb0
[  139.652495]  netlink_unicast+0x181/0x210
[  139.653211]  netlink_sendmsg+0x218/0x3e0
[  139.653936]  sock_sendmsg+0x36/0x40
[  139.654583]  __sys_sendto+0xf1/0x160
[  139.655244]  ? syscall_trace_enter+0x1d3/0x330
[  139.656055]  ? trace_hardirqs_off_thunk+0x1a/0x1c
[  139.656912]  __x64_sys_sendto+0x24/0x30
[  139.657615]  do_syscall_64+0x60/0x1f0
[  139.658285]  entry_SYSCALL_64_after_hwframe+0x49/0xbe
[  139.659192] RIP: 0033:0x7f989835a7fb
[  139.659847] Code: 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 f3
0f 1e fa 48 8d 05 25 6f 0c 00 41 89 ca 8b 00 85 c0 75 14 b8 2c 00 00
00 0f 05 <48> 3d 00 f0 ff ff 77 75 c3 0f 1f 40 00 41 57 4d 89 c7 41 56
41 89
[  139.663170] RSP: 002b:00007ffc95ae6b48 EFLAGS: 00000246 ORIG_RAX:
000000000000002c
[  139.664531] RAX: ffffffffffffffda RBX: 0000000000000460 RCX: 00007f989835a7fb
[  139.665809] RDX: 0000000000000460 RSI: 00007ffc95ae6b80 RDI: 0000000000000003
[  139.667087] RBP: 0000000000000003 R08: 00007ffc95ae6b6c R09: 000000000000000c
[  139.668370] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc95ae6b80
[  139.669648] R13: 00007ffc95ae6b6c R14: 0000000000000002 R15: 000000000000044f
[  139.670935] Modules linked in: ib_isert iscsi_target_mod ib_srpt
target_core_mod ib_srp scsi_transport_srp rpcrdma ib_umad rdma_ucm
ib_iser ib_ipoib rdma_cm iw_cm libiscsi scsi_transport_iscsi ib_cm
mlx5_ib ib_uverbs ib_core crct10dif_pclmul crc32_pclmul
ghash_clmulni_intel joydev virtio_balloon i2c_piix4 sunrpc
drm_kms_helper ttm crc32c_intel mlx5_core drm virtio_console mlxfw
serio_raw devlink virtio_blk virtio_net net_failover failover
ata_generic pata_acpi qemu_fw_cfg
[  139.678676] ---[ end trace be98f2acb1e536e4 ]---

> diff --git a/tests/stress_tree/Makefile b/tests/stress_tree/Makefile
> new file mode 100644
> index 000000000000..7ade09aad86f
> --- /dev/null
> +++ b/tests/stress_tree/Makefile
> @@ -0,0 +1,8 @@
> +TARGETS=$(patsubst %.c,%,$(wildcard *.c))
> +
> +LDLIBS += -lpthread
> +
> +all: $(TARGETS)
> +clean:
> +       rm -f $(TARGETS)
> +
> diff --git a/tests/stress_tree/test b/tests/stress_tree/test
> new file mode 100755
> index 000000000000..6215bec810d1
> --- /dev/null
> +++ b/tests/stress_tree/test
> @@ -0,0 +1,171 @@
> +#!/usr/bin/perl
> +
> +use strict;
> +
> +use Test;
> +BEGIN { plan tests => 1 }
> +
> +use File::Temp qw/ tempdir tempfile /;
> +
> +###
> +# functions
> +
> +sub key_gen {
> +       my @chars = ( "A" .. "Z", "a" .. "z" );
> +       my $key = "testsuite-" . time . "-";
> +       $key .= $chars[ rand @chars ] for 1 .. 8;
> +       return $key;
> +}
> +
> +# Run stat on random files in subtrees to generate audit events
> +sub run_stat {
> +       my($dir,$dirs) = @_;
> +       my $path;
> +
> +       while (1) {
> +               $path = "$dir/mnt/mnt".int(rand($dirs))."/subdir".int(rand($dirs));
> +               stat($path);
> +       }
> +}
> +
> +# Generate audit rules for subtrees. Do one rule per subtree. Because watch
> +# recursively iterates child mounts and we mount $dir/leaf$i under various
> +# subtrees, the inode corresponding to $dir/leaf$i gets tagged by different
> +# trees.
> +sub run_mark_audit {
> +       my($dir,$dirs,$key) = @_;
> +
> +       while (1) {
> +               for (my $i=0; $i < $dirs; $i++) {
> +                       system("auditctl -w $dir/mnt/mnt$i -p r -k $key");
> +               }
> +               system("auditctl -D -k $key >& /dev/null");
> +       }
> +}
> +
> +sub umount_all {
> +       my($dir,$dirs,$ignore_fail) = @_;
> +
> +       for (my $i=0; $i < $dirs; $i++) {
> +               while (system("umount $dir/leaf$i >& /dev/null") > 0 &&
> +                      $ignore_fail == 0) {
> +                       # Nothing - loop until umount succeeds
> +               }
> +       }
> +       for (my $i=0; $i < $dirs; $i++) {
> +               for (my $j=0; $j < $dirs; $j++) {
> +                       while (system("umount $dir/mnt/mnt$i/subdir$j >& /dev/null") > 0 &&
> +                              $ignore_fail == 0) {
> +                               # Nothing - loop until umount succeeds
> +                       }
> +               }
> +               while (system("umount $dir/mnt/mnt$i >& /dev/null") > 0 &&
> +                      $ignore_fail == 0) {
> +                       # Nothing - loop until umount succeeds
> +               }
> +       }
> +}
> +
> +# Mount and unmount filesystems. We pick random leaf mount so that sometimes
> +# a leaf mount point root inode will gather more tags from different trees
> +# and sometimes we will be quicker in unmounting all instances of leaf and
> +# thus excercise inode evistion path
> +sub run_mount {
> +       my($dir,$dirs) = @_;
> +
> +       while (1) {
> +               # We use tmpfs here and not just bind mounts of some dir so
> +               # that the root inode gets evicted once all instances are
> +               # unmounted.
> +               for (my $i=0; $i < $dirs; $i++) {
> +                       system("mount -t tmpfs none $dir/leaf$i");
> +               }
> +               for (my $i=0; $i < $dirs; $i++) {
> +                       system("mount --bind $dir/dir$i $dir/mnt/mnt$i");
> +                       for (my $j=0; $j < $dirs; $j++) {
> +                               my $leaf="$dir/leaf".int(rand($dirs));
> +                               system("mount --bind $leaf $dir/mnt/mnt$i/subdir$j");
> +                       }
> +               }
> +               umount_all($dir, $dirs, 0);
> +       }
> +}
> +
> +
> +###
> +# setup
> +
> +# reset audit
> +system("auditctl -D >& /dev/null");
> +
> +# create temp directory
> +my $dir = tempdir( TEMPLATE => '/tmp/audit-testsuite-XXXX', CLEANUP => 1 );
> +
> +# create stdout/stderr sinks
> +( my $fh_out, my $stdout ) = tempfile(
> +       TEMPLATE => '/tmp/audit-testsuite-out-XXXX',
> +       UNLINK   => 1
> +);
> +( my $fh_err, my $stderr ) = tempfile(
> +       TEMPLATE => '/tmp/audit-testsuite-err-XXXX',
> +       UNLINK   => 1
> +);
> +
> +###
> +# tests
> +
> +my $dirs = 4;
> +
> +# setup directory hierarchy
> +for (my $i=0; $i < $dirs; $i++) {
> +       mkdir $dir."/dir".$i;
> +       for (my $j=0; $j < $dirs; $j++) {
> +               mkdir $dir."/dir".$i."/subdir".$j;
> +       }
> +}
> +mkdir "$dir/mnt";
> +for (my $i=0; $i < $dirs; $i++) {
> +       mkdir "$dir/mnt/mnt$i";
> +       mkdir "$dir/leaf$i";
> +}
> +
> +my $stat_pid = fork();
> +
> +if ($stat_pid == 0) {
> +       run_stat($dir, $dirs);
> +       # Never reached
> +       exit;
> +}
> +
> +my $mount_pid = fork();
> +
> +if ($mount_pid == 0) {
> +       run_mount($dir, $dirs);
> +       # Never reached
> +       exit;
> +}
> +
> +my $key = key_gen();
> +
> +my $audit_pid = fork();
> +
> +if ($audit_pid == 0) {
> +       run_mark_audit($dir, $dirs, $key);
> +       # Never reached
> +       exit;
> +}
> +
> +# Sleep for a minute to let stress test run...
> +sleep(60);
> +ok(1);
> +
> +###
> +# cleanup
> +
> +kill('KILL', $stat_pid, $mount_pid, $audit_pid);
> +# Wait for children to terminate
> +waitpid($stat_pid, 0);
> +waitpid($mount_pid, 0);
> +waitpid($audit_pid, 0);
> +system("auditctl -D >& /dev/null");
> +umount_all($dir, $dirs, 1);
> --
> 2.16.4
>


--
paul moore
www.paul-moore.com
Jan Kara Oct. 9, 2018, 7:40 a.m. UTC | #4
On Fri 05-10-18 17:06:22, Paul Moore wrote:
> On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> > Add stress test for stressing audit tree watches by adding and deleting
> > rules while events are generated and watched filesystems are mounted and
> > unmounted in parallel.
> >
> > Signed-off-by: Jan Kara <jack@suse.cz>
> > ---
> >  tests/stress_tree/Makefile |   8 +++
> >  tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
> >  2 files changed, 179 insertions(+)
> >  create mode 100644 tests/stress_tree/Makefile
> >  create mode 100755 tests/stress_tree/test
> 
> No commentary on the test itself, other than perhaps it should live
> under test_manual/, but in running the tests in a loop today I am
> reliably able to panic my test kernel after ~30m or so.

Interesting. How do you run the test?

> I'm using the kernel linked below which is Fedora Rawhide +
> selinux/next + audit/next + audit/working-fsnotify_fixes; a link to
> the patches added to the Rawhide kernel can be found in the list
> archive linked below.
> 
> * https://groups.google.com/forum/#!topic/kernel-secnext/SFv0d-ij3z8
> * https://copr.fedorainfracloud.org/coprs/pcmoore/kernel-secnext/build/805949
> 
> The initial panic dump is below:

I can try to reproduce this but could you perhaps find out which of the
list manipulations in audit_remove_tree_rule() hit the corrution?

								Honza

> 
> [  139.619065] list_del corruption. prev->next should be
> ffff985fa98d4100, but was ffff985fae91e370
> [  139.622504] ------------[ cut here ]------------
> [  139.623402] kernel BUG at lib/list_debug.c:53!
> [  139.624294] invalid opcode: 0000 [#1] SMP PTI
> [  139.625439] CPU: 1 PID: 3248 Comm: auditctl Not tainted
> 4.19.0-0.rc6.git2.1.1.secnext.fc30.x86_64 #1
> [  139.630761] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
> [  139.631812] RIP: 0010:__list_del_entry_valid.cold.1+0x34/0x4c
> [  139.632853] Code: ce 34 ac e8 b8 f7 c0 ff 0f 0b 48 c7 c7 a8 cf 34
> ac e8 aa f7 c0 ff 0f 0b 48 89 f2 48 89 fe 48 c7 c7 68 cf 34 ac e8 96
> f7 c0 ff <0f> 0b 48 89 fe 48 c7 c7 30 cf 34 ac e8 85 f7 c0 ff 0f 0b 90
> 90 90
> [  139.636347] RSP: 0018:ffffb4890293fbb8 EFLAGS: 00010246
> [  139.637295] RAX: 0000000000000054 RBX: ffff985fae91e620 RCX: 0000000000000000
> [  139.638573] RDX: 0000000000000000 RSI: ffff985fb77d6be8 RDI: ffff985fb77d6be8
> [  139.639855] RBP: ffff985fa98d40c0 R08: 00046c1ac1fe8d00 R09: 0000000000000000
> [  139.641136] R10: 0000000000000000 R11: 0000000000000000 R12: ffff985fa98d4100
> [  139.642416] R13: 0000000000000000 R14: ffff985faf00fe20 R15: 00000000000003f4
> [  139.643699] FS:  00007f9898252b80(0000) GS:ffff985fb7600000(0000)
> knlGS:0000000000000000
> [  139.645199] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [  139.646244] CR2: 0000561d333b8218 CR3: 000000023110a000 CR4: 00000000001406e0
> [  139.647533] Call Trace:
> [  139.647997]  audit_remove_tree_rule+0xad/0x160
> [  139.648819]  audit_del_rule+0x90/0x190
> [  139.649507]  audit_rule_change+0x98e/0xbf0
> [  139.650259]  audit_receive_msg+0x142/0x1160
> [  139.651030]  ? netlink_deliver_tap+0x99/0x410
> [  139.651832]  audit_receive+0x54/0xb0
> [  139.652495]  netlink_unicast+0x181/0x210
> [  139.653211]  netlink_sendmsg+0x218/0x3e0
> [  139.653936]  sock_sendmsg+0x36/0x40
> [  139.654583]  __sys_sendto+0xf1/0x160
> [  139.655244]  ? syscall_trace_enter+0x1d3/0x330
> [  139.656055]  ? trace_hardirqs_off_thunk+0x1a/0x1c
> [  139.656912]  __x64_sys_sendto+0x24/0x30
> [  139.657615]  do_syscall_64+0x60/0x1f0
> [  139.658285]  entry_SYSCALL_64_after_hwframe+0x49/0xbe
> [  139.659192] RIP: 0033:0x7f989835a7fb
> [  139.659847] Code: 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 f3
> 0f 1e fa 48 8d 05 25 6f 0c 00 41 89 ca 8b 00 85 c0 75 14 b8 2c 00 00
> 00 0f 05 <48> 3d 00 f0 ff ff 77 75 c3 0f 1f 40 00 41 57 4d 89 c7 41 56
> 41 89
> [  139.663170] RSP: 002b:00007ffc95ae6b48 EFLAGS: 00000246 ORIG_RAX:
> 000000000000002c
> [  139.664531] RAX: ffffffffffffffda RBX: 0000000000000460 RCX: 00007f989835a7fb
> [  139.665809] RDX: 0000000000000460 RSI: 00007ffc95ae6b80 RDI: 0000000000000003
> [  139.667087] RBP: 0000000000000003 R08: 00007ffc95ae6b6c R09: 000000000000000c
> [  139.668370] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc95ae6b80
> [  139.669648] R13: 00007ffc95ae6b6c R14: 0000000000000002 R15: 000000000000044f
> [  139.670935] Modules linked in: ib_isert iscsi_target_mod ib_srpt
> target_core_mod ib_srp scsi_transport_srp rpcrdma ib_umad rdma_ucm
> ib_iser ib_ipoib rdma_cm iw_cm libiscsi scsi_transport_iscsi ib_cm
> mlx5_ib ib_uverbs ib_core crct10dif_pclmul crc32_pclmul
> ghash_clmulni_intel joydev virtio_balloon i2c_piix4 sunrpc
> drm_kms_helper ttm crc32c_intel mlx5_core drm virtio_console mlxfw
> serio_raw devlink virtio_blk virtio_net net_failover failover
> ata_generic pata_acpi qemu_fw_cfg
> [  139.678676] ---[ end trace be98f2acb1e536e4 ]---
Paul Moore Oct. 10, 2018, 6:43 a.m. UTC | #5
On Tue, Oct 9, 2018 at 3:40 AM Jan Kara <jack@suse.cz> wrote:
> On Fri 05-10-18 17:06:22, Paul Moore wrote:
> > On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> > > Add stress test for stressing audit tree watches by adding and deleting
> > > rules while events are generated and watched filesystems are mounted and
> > > unmounted in parallel.
> > >
> > > Signed-off-by: Jan Kara <jack@suse.cz>
> > > ---
> > >  tests/stress_tree/Makefile |   8 +++
> > >  tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
> > >  2 files changed, 179 insertions(+)
> > >  create mode 100644 tests/stress_tree/Makefile
> > >  create mode 100755 tests/stress_tree/test
> >
> > No commentary on the test itself, other than perhaps it should live
> > under test_manual/, but in running the tests in a loop today I am
> > reliably able to panic my test kernel after ~30m or so.
>
> Interesting. How do you run the test?

Nothing fancy, just a simple bash loop:

  # cd tests/stress_tree
  # while ./test; do /bin/true; done

> > I'm using the kernel linked below which is Fedora Rawhide +
> > selinux/next + audit/next + audit/working-fsnotify_fixes; a link to
> > the patches added to the Rawhide kernel can be found in the list
> > archive linked below.
> >
> > * https://groups.google.com/forum/#!topic/kernel-secnext/SFv0d-ij3z8
> > * https://copr.fedorainfracloud.org/coprs/pcmoore/kernel-secnext/build/805949
> >
> > The initial panic dump is below:
>
> I can try to reproduce this but could you perhaps find out which of the
> list manipulations in audit_remove_tree_rule() hit the corrution?

A quick dump of the audit_remove_tree_rule() function makes it look
like it is the inner most list_del_init() call.

Dump of assembler code for function audit_remove_tree_rule:
646     {
  0xffffffff811ad900 <+0>:     callq  0xffffffff81c01850 <__fentry__>

647             struct audit_tree *tree;

648             tree = rule->tree;
  0xffffffff811ad905 <+5>:     push   %r14
  0xffffffff811ad907 <+7>:     xor    %eax,%eax
  0xffffffff811ad909 <+9>:     push   %r13
  0xffffffff811ad90b <+11>:    push   %r12
  0xffffffff811ad90d <+13>:    push   %rbp
  0xffffffff811ad90e <+14>:    push   %rbx
  0xffffffff811ad90f <+15>:    mov    0x140(%rdi),%rbp

649             if (tree) {
  0xffffffff811ad916 <+22>:    test   %rbp,%rbp
  0xffffffff811ad919 <+25>:    je     0xffffffff811ad990
<audit_remove_tree_rule+144>
  0xffffffff811ad91b <+27>:    mov    %rdi,%rbx

650                     spin_lock(&hash_lock);
651                     list_del_init(&rule->rlist);
  0xffffffff811ad92a <+42>:    lea    0x150(%rbx),%r12

652                     if (list_empty(&tree->rules) && !tree->goner) {
653                             tree->root = NULL;
  0xffffffff811ad999 <+153>:   movq   $0x0,0x8(%rbp)

654                             list_del_init(&tree->same_root);
  0xffffffff811ad9a1 <+161>:   lea    0x40(%rbp),%r12

655                             tree->goner = 1;
  0xffffffff811ad9c8 <+200>:   lea    0x30(%rbp),%r12
  0xffffffff811ad9cc <+204>:   movl   $0x1,0x4(%rbp)

656                             list_move(&tree->list, &prune_list);
657                             rule->tree = NULL;
  0xffffffff811ada21 <+289>:   movq   $0x0,0x140(%rbx)

658                             spin_unlock(&hash_lock);
659                             audit_schedule_prune();

660                             return 1;
  0xffffffff811ada44 <+324>:   mov    $0x1,%eax
  0xffffffff811ada49 <+329>:   pop    %rbx
  0xffffffff811ada4a <+330>:   pop    %rbp
  0xffffffff811ada4b <+331>:   pop    %r12
  0xffffffff811ada4d <+333>:   pop    %r13
  0xffffffff811ada4f <+335>:   pop    %r14
  0xffffffff811ada51 <+337>:   retq
  0xffffffff811ada52:  data16 nopw %cs:0x0(%rax,%rax,1)
  0xffffffff811ada5d:  nopl   (%rax)

661                     }
662                     rule->tree = NULL;
  0xffffffff811ad974 <+116>:   movq   $0x0,0x140(%rbx)

663                     spin_unlock(&hash_lock);
664                     return 1;
  0xffffffff811ad98b <+139>:   mov    $0x1,%eax
  0xffffffff811ad990 <+144>:   pop    %rbx
  0xffffffff811ad991 <+145>:   pop    %rbp
  0xffffffff811ad992 <+146>:   pop    %r12
  0xffffffff811ad994 <+148>:   pop    %r13
  0xffffffff811ad996 <+150>:   pop    %r14
  0xffffffff811ad998 <+152>:   retq

665             }
666             return 0;
667     }

> > [  139.619065] list_del corruption. prev->next should be
> > ffff985fa98d4100, but was ffff985fae91e370
> > [  139.622504] ------------[ cut here ]------------
> > [  139.623402] kernel BUG at lib/list_debug.c:53!
> > [  139.624294] invalid opcode: 0000 [#1] SMP PTI
> > [  139.625439] CPU: 1 PID: 3248 Comm: auditctl Not tainted
> > 4.19.0-0.rc6.git2.1.1.secnext.fc30.x86_64 #1
> > [  139.630761] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
> > [  139.631812] RIP: 0010:__list_del_entry_valid.cold.1+0x34/0x4c
> > [  139.632853] Code: ce 34 ac e8 b8 f7 c0 ff 0f 0b 48 c7 c7 a8 cf 34
> > ac e8 aa f7 c0 ff 0f 0b 48 89 f2 48 89 fe 48 c7 c7 68 cf 34 ac e8 96
> > f7 c0 ff <0f> 0b 48 89 fe 48 c7 c7 30 cf 34 ac e8 85 f7 c0 ff 0f 0b 90
> > 90 90
> > [  139.636347] RSP: 0018:ffffb4890293fbb8 EFLAGS: 00010246
> > [  139.637295] RAX: 0000000000000054 RBX: ffff985fae91e620 RCX: 0000000000000000
> > [  139.638573] RDX: 0000000000000000 RSI: ffff985fb77d6be8 RDI: ffff985fb77d6be8
> > [  139.639855] RBP: ffff985fa98d40c0 R08: 00046c1ac1fe8d00 R09: 0000000000000000
> > [  139.641136] R10: 0000000000000000 R11: 0000000000000000 R12: ffff985fa98d4100
> > [  139.642416] R13: 0000000000000000 R14: ffff985faf00fe20 R15: 00000000000003f4
> > [  139.643699] FS:  00007f9898252b80(0000) GS:ffff985fb7600000(0000)
> > knlGS:0000000000000000
> > [  139.645199] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> > [  139.646244] CR2: 0000561d333b8218 CR3: 000000023110a000 CR4: 00000000001406e0
> > [  139.647533] Call Trace:
> > [  139.647997]  audit_remove_tree_rule+0xad/0x160
> > [  139.648819]  audit_del_rule+0x90/0x190
> > [  139.649507]  audit_rule_change+0x98e/0xbf0
> > [  139.650259]  audit_receive_msg+0x142/0x1160
> > [  139.651030]  ? netlink_deliver_tap+0x99/0x410
> > [  139.651832]  audit_receive+0x54/0xb0
> > [  139.652495]  netlink_unicast+0x181/0x210
> > [  139.653211]  netlink_sendmsg+0x218/0x3e0
> > [  139.653936]  sock_sendmsg+0x36/0x40
> > [  139.654583]  __sys_sendto+0xf1/0x160
> > [  139.655244]  ? syscall_trace_enter+0x1d3/0x330
> > [  139.656055]  ? trace_hardirqs_off_thunk+0x1a/0x1c
> > [  139.656912]  __x64_sys_sendto+0x24/0x30
> > [  139.657615]  do_syscall_64+0x60/0x1f0
> > [  139.658285]  entry_SYSCALL_64_after_hwframe+0x49/0xbe
> > [  139.659192] RIP: 0033:0x7f989835a7fb
> > [  139.659847] Code: 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 f3
> > 0f 1e fa 48 8d 05 25 6f 0c 00 41 89 ca 8b 00 85 c0 75 14 b8 2c 00 00
> > 00 0f 05 <48> 3d 00 f0 ff ff 77 75 c3 0f 1f 40 00 41 57 4d 89 c7 41 56
> > 41 89
> > [  139.663170] RSP: 002b:00007ffc95ae6b48 EFLAGS: 00000246 ORIG_RAX:
> > 000000000000002c
> > [  139.664531] RAX: ffffffffffffffda RBX: 0000000000000460 RCX: 00007f989835a7fb
> > [  139.665809] RDX: 0000000000000460 RSI: 00007ffc95ae6b80 RDI: 0000000000000003
> > [  139.667087] RBP: 0000000000000003 R08: 00007ffc95ae6b6c R09: 000000000000000c
> > [  139.668370] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc95ae6b80
> > [  139.669648] R13: 00007ffc95ae6b6c R14: 0000000000000002 R15: 000000000000044f
> > [  139.670935] Modules linked in: ib_isert iscsi_target_mod ib_srpt
> > target_core_mod ib_srp scsi_transport_srp rpcrdma ib_umad rdma_ucm
> > ib_iser ib_ipoib rdma_cm iw_cm libiscsi scsi_transport_iscsi ib_cm
> > mlx5_ib ib_uverbs ib_core crct10dif_pclmul crc32_pclmul
> > ghash_clmulni_intel joydev virtio_balloon i2c_piix4 sunrpc
> > drm_kms_helper ttm crc32c_intel mlx5_core drm virtio_console mlxfw
> > serio_raw devlink virtio_blk virtio_net net_failover failover
> > ata_generic pata_acpi qemu_fw_cfg
> > [  139.678676] ---[ end trace be98f2acb1e536e4 ]---
> --
> Jan Kara <jack@suse.com>
> SUSE Labs, CR

--
paul moore
www.paul-moore.com
Jan Kara Oct. 11, 2018, 11:39 a.m. UTC | #6
On Wed 10-10-18 02:43:46, Paul Moore wrote:
> On Tue, Oct 9, 2018 at 3:40 AM Jan Kara <jack@suse.cz> wrote:
> > On Fri 05-10-18 17:06:22, Paul Moore wrote:
> > > On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> > > > Add stress test for stressing audit tree watches by adding and deleting
> > > > rules while events are generated and watched filesystems are mounted and
> > > > unmounted in parallel.
> > > >
> > > > Signed-off-by: Jan Kara <jack@suse.cz>
> > > > ---
> > > >  tests/stress_tree/Makefile |   8 +++
> > > >  tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
> > > >  2 files changed, 179 insertions(+)
> > > >  create mode 100644 tests/stress_tree/Makefile
> > > >  create mode 100755 tests/stress_tree/test
> > >
> > > No commentary on the test itself, other than perhaps it should live
> > > under test_manual/, but in running the tests in a loop today I am
> > > reliably able to panic my test kernel after ~30m or so.
> >
> > Interesting. How do you run the test?
> 
> Nothing fancy, just a simple bash loop:
> 
>   # cd tests/stress_tree
>   # while ./test; do /bin/true; done

OK, I did succeed in reproducing some problems with my patches - once I was
able to trigger a livelock and following softlockup warning - this is
actually a problem introduced by my patches, and once a use after free
issue (not sure what that was since after I've added some debugging I
wasn't able to trigger it anymore). Anyway, I'll try more after fixing the
livelock. Do you want me to add fixes on top of my series or just fixup the
original series?

								Honza
Paul Moore Oct. 11, 2018, 11:03 p.m. UTC | #7
On October 11, 2018 7:39:39 AM Jan Kara <jack@suse.cz> wrote:
> On Wed 10-10-18 02:43:46, Paul Moore wrote:
>> On Tue, Oct 9, 2018 at 3:40 AM Jan Kara <jack@suse.cz> wrote:
>>> On Fri 05-10-18 17:06:22, Paul Moore wrote:
>>>> On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
>>>>> Add stress test for stressing audit tree watches by adding and deleting
>>>>> rules while events are generated and watched filesystems are mounted and
>>>>> unmounted in parallel.
>>>>>
>>>>> Signed-off-by: Jan Kara <jack@suse.cz>
>>>>> ---
>>>>> tests/stress_tree/Makefile |   8 +++
>>>>> tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
>>>>> 2 files changed, 179 insertions(+)
>>>>> create mode 100644 tests/stress_tree/Makefile
>>>>> create mode 100755 tests/stress_tree/test
>>>>
>>>> No commentary on the test itself, other than perhaps it should live
>>>> under test_manual/, but in running the tests in a loop today I am
>>>> reliably able to panic my test kernel after ~30m or so.
>>>
>>> Interesting. How do you run the test?
>>
>> Nothing fancy, just a simple bash loop:
>>
>> # cd tests/stress_tree
>> # while ./test; do /bin/true; done
>
> OK, I did succeed in reproducing some problems with my patches - once I was
> able to trigger a livelock and following softlockup warning - this is
> actually a problem introduced by my patches, and once a use after free
> issue (not sure what that was since after I've added some debugging I
> wasn't able to trigger it anymore). Anyway, I'll try more after fixing the
> livelock. Do you want me to add fixes on top of my series or just fixup the
> original series?

Since these are pretty serious bugs, and I try to avoid merging known-broken patches which will go up to Linus, why don't you go ahead and respin the patchset with the new fixes included.  You can also use the opportunity to squash in the rename patch and fix that mid-patchset compilation problem that I fixed up during the merge.

Thanks.

--
paul moore
www.paul-moore.com
Jan Kara Oct. 15, 2018, 10:04 a.m. UTC | #8
On Thu 11-10-18 19:03:53, Paul Moore wrote:
> On October 11, 2018 7:39:39 AM Jan Kara <jack@suse.cz> wrote:
> > On Wed 10-10-18 02:43:46, Paul Moore wrote:
> >> On Tue, Oct 9, 2018 at 3:40 AM Jan Kara <jack@suse.cz> wrote:
> >>> On Fri 05-10-18 17:06:22, Paul Moore wrote:
> >>>> On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> >>>>> Add stress test for stressing audit tree watches by adding and deleting
> >>>>> rules while events are generated and watched filesystems are mounted and
> >>>>> unmounted in parallel.
> >>>>>
> >>>>> Signed-off-by: Jan Kara <jack@suse.cz>
> >>>>> ---
> >>>>> tests/stress_tree/Makefile |   8 +++
> >>>>> tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
> >>>>> 2 files changed, 179 insertions(+)
> >>>>> create mode 100644 tests/stress_tree/Makefile
> >>>>> create mode 100755 tests/stress_tree/test
> >>>>
> >>>> No commentary on the test itself, other than perhaps it should live
> >>>> under test_manual/, but in running the tests in a loop today I am
> >>>> reliably able to panic my test kernel after ~30m or so.
> >>>
> >>> Interesting. How do you run the test?
> >>
> >> Nothing fancy, just a simple bash loop:
> >>
> >> # cd tests/stress_tree
> >> # while ./test; do /bin/true; done
> >
> > OK, I did succeed in reproducing some problems with my patches - once I was
> > able to trigger a livelock and following softlockup warning - this is
> > actually a problem introduced by my patches, and once a use after free
> > issue (not sure what that was since after I've added some debugging I
> > wasn't able to trigger it anymore). Anyway, I'll try more after fixing the
> > livelock. Do you want me to add fixes on top of my series or just fixup the
> > original series?
> 
> Since these are pretty serious bugs, and I try to avoid merging
> known-broken patches which will go up to Linus, why don't you go ahead
> and respin the patchset with the new fixes included.  You can also use
> the opportunity to squash in the rename patch and fix that mid-patchset
> compilation problem that I fixed up during the merge.

OK, I'm now testing a version with the softlockup fixed and some locking
around untag_chunk() simplified when I had to meddle with that anyway. I'll
see if I can hit further failures...

								Honza
Paul Moore Oct. 15, 2018, 3:39 p.m. UTC | #9
On Mon, Oct 15, 2018 at 6:04 AM Jan Kara <jack@suse.cz> wrote:
> On Thu 11-10-18 19:03:53, Paul Moore wrote:
> > On October 11, 2018 7:39:39 AM Jan Kara <jack@suse.cz> wrote:
> > > On Wed 10-10-18 02:43:46, Paul Moore wrote:
> > >> On Tue, Oct 9, 2018 at 3:40 AM Jan Kara <jack@suse.cz> wrote:
> > >>> On Fri 05-10-18 17:06:22, Paul Moore wrote:
> > >>>> On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> > >>>>> Add stress test for stressing audit tree watches by adding and deleting
> > >>>>> rules while events are generated and watched filesystems are mounted and
> > >>>>> unmounted in parallel.
> > >>>>>
> > >>>>> Signed-off-by: Jan Kara <jack@suse.cz>
> > >>>>> ---
> > >>>>> tests/stress_tree/Makefile |   8 +++
> > >>>>> tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
> > >>>>> 2 files changed, 179 insertions(+)
> > >>>>> create mode 100644 tests/stress_tree/Makefile
> > >>>>> create mode 100755 tests/stress_tree/test
> > >>>>
> > >>>> No commentary on the test itself, other than perhaps it should live
> > >>>> under test_manual/, but in running the tests in a loop today I am
> > >>>> reliably able to panic my test kernel after ~30m or so.
> > >>>
> > >>> Interesting. How do you run the test?
> > >>
> > >> Nothing fancy, just a simple bash loop:
> > >>
> > >> # cd tests/stress_tree
> > >> # while ./test; do /bin/true; done
> > >
> > > OK, I did succeed in reproducing some problems with my patches - once I was
> > > able to trigger a livelock and following softlockup warning - this is
> > > actually a problem introduced by my patches, and once a use after free
> > > issue (not sure what that was since after I've added some debugging I
> > > wasn't able to trigger it anymore). Anyway, I'll try more after fixing the
> > > livelock. Do you want me to add fixes on top of my series or just fixup the
> > > original series?
> >
> > Since these are pretty serious bugs, and I try to avoid merging
> > known-broken patches which will go up to Linus, why don't you go ahead
> > and respin the patchset with the new fixes included.  You can also use
> > the opportunity to squash in the rename patch and fix that mid-patchset
> > compilation problem that I fixed up during the merge.
>
> OK, I'm now testing a version with the softlockup fixed and some locking
> around untag_chunk() simplified when I had to meddle with that anyway. I'll
> see if I can hit further failures...

Thanks for the update, let me know how the testing goes ...
Jan Kara Oct. 17, 2018, 10:09 a.m. UTC | #10
On Mon 15-10-18 11:39:51, Paul Moore wrote:
> On Mon, Oct 15, 2018 at 6:04 AM Jan Kara <jack@suse.cz> wrote:
> > On Thu 11-10-18 19:03:53, Paul Moore wrote:
> > > On October 11, 2018 7:39:39 AM Jan Kara <jack@suse.cz> wrote:
> > > > On Wed 10-10-18 02:43:46, Paul Moore wrote:
> > > >> On Tue, Oct 9, 2018 at 3:40 AM Jan Kara <jack@suse.cz> wrote:
> > > >>> On Fri 05-10-18 17:06:22, Paul Moore wrote:
> > > >>>> On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> > > >>>>> Add stress test for stressing audit tree watches by adding and deleting
> > > >>>>> rules while events are generated and watched filesystems are mounted and
> > > >>>>> unmounted in parallel.
> > > >>>>>
> > > >>>>> Signed-off-by: Jan Kara <jack@suse.cz>
> > > >>>>> ---
> > > >>>>> tests/stress_tree/Makefile |   8 +++
> > > >>>>> tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
> > > >>>>> 2 files changed, 179 insertions(+)
> > > >>>>> create mode 100644 tests/stress_tree/Makefile
> > > >>>>> create mode 100755 tests/stress_tree/test
> > > >>>>
> > > >>>> No commentary on the test itself, other than perhaps it should live
> > > >>>> under test_manual/, but in running the tests in a loop today I am
> > > >>>> reliably able to panic my test kernel after ~30m or so.
> > > >>>
> > > >>> Interesting. How do you run the test?
> > > >>
> > > >> Nothing fancy, just a simple bash loop:
> > > >>
> > > >> # cd tests/stress_tree
> > > >> # while ./test; do /bin/true; done
> > > >
> > > > OK, I did succeed in reproducing some problems with my patches - once I was
> > > > able to trigger a livelock and following softlockup warning - this is
> > > > actually a problem introduced by my patches, and once a use after free
> > > > issue (not sure what that was since after I've added some debugging I
> > > > wasn't able to trigger it anymore). Anyway, I'll try more after fixing the
> > > > livelock. Do you want me to add fixes on top of my series or just fixup the
> > > > original series?
> > >
> > > Since these are pretty serious bugs, and I try to avoid merging
> > > known-broken patches which will go up to Linus, why don't you go ahead
> > > and respin the patchset with the new fixes included.  You can also use
> > > the opportunity to squash in the rename patch and fix that mid-patchset
> > > compilation problem that I fixed up during the merge.
> >
> > OK, I'm now testing a version with the softlockup fixed and some locking
> > around untag_chunk() simplified when I had to meddle with that anyway. I'll
> > see if I can hit further failures...
> 
> Thanks for the update, let me know how the testing goes ...

OK, yesterday I've finally nailed down the list corruption. Testing has ran
fine for 10 hours, after that it crashed due to independent problem in
fsnotify infrastructure. I've restarted the testing but I think patches are
good for another posting - will send in a minute.

								Honza
Paul Moore Nov. 14, 2018, 12:34 a.m. UTC | #11
On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> Add stress test for stressing audit tree watches by adding and deleting
> rules while events are generated and watched filesystems are mounted and
> unmounted in parallel.
>
> Signed-off-by: Jan Kara <jack@suse.cz>
> ---
>  tests/stress_tree/Makefile |   8 +++
>  tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 179 insertions(+)
>  create mode 100644 tests/stress_tree/Makefile
>  create mode 100755 tests/stress_tree/test

I'd like to get this into the audit-testsuite repo, but I think it
should live under test_manual/ instead of tests, is that okay with
you?  If so, no need to resubmit, I can move the file during the
merge.

> diff --git a/tests/stress_tree/Makefile b/tests/stress_tree/Makefile
> new file mode 100644
> index 000000000000..7ade09aad86f
> --- /dev/null
> +++ b/tests/stress_tree/Makefile
> @@ -0,0 +1,8 @@
> +TARGETS=$(patsubst %.c,%,$(wildcard *.c))
> +
> +LDLIBS += -lpthread
> +
> +all: $(TARGETS)
> +clean:
> +       rm -f $(TARGETS)
> +
> diff --git a/tests/stress_tree/test b/tests/stress_tree/test
> new file mode 100755
> index 000000000000..6215bec810d1
> --- /dev/null
> +++ b/tests/stress_tree/test
> @@ -0,0 +1,171 @@
> +#!/usr/bin/perl
> +
> +use strict;
> +
> +use Test;
> +BEGIN { plan tests => 1 }
> +
> +use File::Temp qw/ tempdir tempfile /;
> +
> +###
> +# functions
> +
> +sub key_gen {
> +       my @chars = ( "A" .. "Z", "a" .. "z" );
> +       my $key = "testsuite-" . time . "-";
> +       $key .= $chars[ rand @chars ] for 1 .. 8;
> +       return $key;
> +}
> +
> +# Run stat on random files in subtrees to generate audit events
> +sub run_stat {
> +       my($dir,$dirs) = @_;
> +       my $path;
> +
> +       while (1) {
> +               $path = "$dir/mnt/mnt".int(rand($dirs))."/subdir".int(rand($dirs));
> +               stat($path);
> +       }
> +}
> +
> +# Generate audit rules for subtrees. Do one rule per subtree. Because watch
> +# recursively iterates child mounts and we mount $dir/leaf$i under various
> +# subtrees, the inode corresponding to $dir/leaf$i gets tagged by different
> +# trees.
> +sub run_mark_audit {
> +       my($dir,$dirs,$key) = @_;
> +
> +       while (1) {
> +               for (my $i=0; $i < $dirs; $i++) {
> +                       system("auditctl -w $dir/mnt/mnt$i -p r -k $key");
> +               }
> +               system("auditctl -D -k $key >& /dev/null");
> +       }
> +}
> +
> +sub umount_all {
> +       my($dir,$dirs,$ignore_fail) = @_;
> +
> +       for (my $i=0; $i < $dirs; $i++) {
> +               while (system("umount $dir/leaf$i >& /dev/null") > 0 &&
> +                      $ignore_fail == 0) {
> +                       # Nothing - loop until umount succeeds
> +               }
> +       }
> +       for (my $i=0; $i < $dirs; $i++) {
> +               for (my $j=0; $j < $dirs; $j++) {
> +                       while (system("umount $dir/mnt/mnt$i/subdir$j >& /dev/null") > 0 &&
> +                              $ignore_fail == 0) {
> +                               # Nothing - loop until umount succeeds
> +                       }
> +               }
> +               while (system("umount $dir/mnt/mnt$i >& /dev/null") > 0 &&
> +                      $ignore_fail == 0) {
> +                       # Nothing - loop until umount succeeds
> +               }
> +       }
> +}
> +
> +# Mount and unmount filesystems. We pick random leaf mount so that sometimes
> +# a leaf mount point root inode will gather more tags from different trees
> +# and sometimes we will be quicker in unmounting all instances of leaf and
> +# thus excercise inode evistion path
> +sub run_mount {
> +       my($dir,$dirs) = @_;
> +
> +       while (1) {
> +               # We use tmpfs here and not just bind mounts of some dir so
> +               # that the root inode gets evicted once all instances are
> +               # unmounted.
> +               for (my $i=0; $i < $dirs; $i++) {
> +                       system("mount -t tmpfs none $dir/leaf$i");
> +               }
> +               for (my $i=0; $i < $dirs; $i++) {
> +                       system("mount --bind $dir/dir$i $dir/mnt/mnt$i");
> +                       for (my $j=0; $j < $dirs; $j++) {
> +                               my $leaf="$dir/leaf".int(rand($dirs));
> +                               system("mount --bind $leaf $dir/mnt/mnt$i/subdir$j");
> +                       }
> +               }
> +               umount_all($dir, $dirs, 0);
> +       }
> +}
> +
> +
> +###
> +# setup
> +
> +# reset audit
> +system("auditctl -D >& /dev/null");
> +
> +# create temp directory
> +my $dir = tempdir( TEMPLATE => '/tmp/audit-testsuite-XXXX', CLEANUP => 1 );
> +
> +# create stdout/stderr sinks
> +( my $fh_out, my $stdout ) = tempfile(
> +       TEMPLATE => '/tmp/audit-testsuite-out-XXXX',
> +       UNLINK   => 1
> +);
> +( my $fh_err, my $stderr ) = tempfile(
> +       TEMPLATE => '/tmp/audit-testsuite-err-XXXX',
> +       UNLINK   => 1
> +);
> +
> +###
> +# tests
> +
> +my $dirs = 4;
> +
> +# setup directory hierarchy
> +for (my $i=0; $i < $dirs; $i++) {
> +       mkdir $dir."/dir".$i;
> +       for (my $j=0; $j < $dirs; $j++) {
> +               mkdir $dir."/dir".$i."/subdir".$j;
> +       }
> +}
> +mkdir "$dir/mnt";
> +for (my $i=0; $i < $dirs; $i++) {
> +       mkdir "$dir/mnt/mnt$i";
> +       mkdir "$dir/leaf$i";
> +}
> +
> +my $stat_pid = fork();
> +
> +if ($stat_pid == 0) {
> +       run_stat($dir, $dirs);
> +       # Never reached
> +       exit;
> +}
> +
> +my $mount_pid = fork();
> +
> +if ($mount_pid == 0) {
> +       run_mount($dir, $dirs);
> +       # Never reached
> +       exit;
> +}
> +
> +my $key = key_gen();
> +
> +my $audit_pid = fork();
> +
> +if ($audit_pid == 0) {
> +       run_mark_audit($dir, $dirs, $key);
> +       # Never reached
> +       exit;
> +}
> +
> +# Sleep for a minute to let stress test run...
> +sleep(60);
> +ok(1);
> +
> +###
> +# cleanup
> +
> +kill('KILL', $stat_pid, $mount_pid, $audit_pid);
> +# Wait for children to terminate
> +waitpid($stat_pid, 0);
> +waitpid($mount_pid, 0);
> +waitpid($audit_pid, 0);
> +system("auditctl -D >& /dev/null");
> +umount_all($dir, $dirs, 1);
> --
> 2.16.4
>
Jan Kara Nov. 14, 2018, 12:16 p.m. UTC | #12
On Tue 13-11-18 19:34:18, Paul Moore wrote:
> On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> > Add stress test for stressing audit tree watches by adding and deleting
> > rules while events are generated and watched filesystems are mounted and
> > unmounted in parallel.
> >
> > Signed-off-by: Jan Kara <jack@suse.cz>
> > ---
> >  tests/stress_tree/Makefile |   8 +++
> >  tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
> >  2 files changed, 179 insertions(+)
> >  create mode 100644 tests/stress_tree/Makefile
> >  create mode 100755 tests/stress_tree/test
> 
> I'd like to get this into the audit-testsuite repo, but I think it
> should live under test_manual/ instead of tests, is that okay with
> you?  If so, no need to resubmit, I can move the file during the
> merge.

Sure, move it wherever you find it best.

								Honza

> 
> > diff --git a/tests/stress_tree/Makefile b/tests/stress_tree/Makefile
> > new file mode 100644
> > index 000000000000..7ade09aad86f
> > --- /dev/null
> > +++ b/tests/stress_tree/Makefile
> > @@ -0,0 +1,8 @@
> > +TARGETS=$(patsubst %.c,%,$(wildcard *.c))
> > +
> > +LDLIBS += -lpthread
> > +
> > +all: $(TARGETS)
> > +clean:
> > +       rm -f $(TARGETS)
> > +
> > diff --git a/tests/stress_tree/test b/tests/stress_tree/test
> > new file mode 100755
> > index 000000000000..6215bec810d1
> > --- /dev/null
> > +++ b/tests/stress_tree/test
> > @@ -0,0 +1,171 @@
> > +#!/usr/bin/perl
> > +
> > +use strict;
> > +
> > +use Test;
> > +BEGIN { plan tests => 1 }
> > +
> > +use File::Temp qw/ tempdir tempfile /;
> > +
> > +###
> > +# functions
> > +
> > +sub key_gen {
> > +       my @chars = ( "A" .. "Z", "a" .. "z" );
> > +       my $key = "testsuite-" . time . "-";
> > +       $key .= $chars[ rand @chars ] for 1 .. 8;
> > +       return $key;
> > +}
> > +
> > +# Run stat on random files in subtrees to generate audit events
> > +sub run_stat {
> > +       my($dir,$dirs) = @_;
> > +       my $path;
> > +
> > +       while (1) {
> > +               $path = "$dir/mnt/mnt".int(rand($dirs))."/subdir".int(rand($dirs));
> > +               stat($path);
> > +       }
> > +}
> > +
> > +# Generate audit rules for subtrees. Do one rule per subtree. Because watch
> > +# recursively iterates child mounts and we mount $dir/leaf$i under various
> > +# subtrees, the inode corresponding to $dir/leaf$i gets tagged by different
> > +# trees.
> > +sub run_mark_audit {
> > +       my($dir,$dirs,$key) = @_;
> > +
> > +       while (1) {
> > +               for (my $i=0; $i < $dirs; $i++) {
> > +                       system("auditctl -w $dir/mnt/mnt$i -p r -k $key");
> > +               }
> > +               system("auditctl -D -k $key >& /dev/null");
> > +       }
> > +}
> > +
> > +sub umount_all {
> > +       my($dir,$dirs,$ignore_fail) = @_;
> > +
> > +       for (my $i=0; $i < $dirs; $i++) {
> > +               while (system("umount $dir/leaf$i >& /dev/null") > 0 &&
> > +                      $ignore_fail == 0) {
> > +                       # Nothing - loop until umount succeeds
> > +               }
> > +       }
> > +       for (my $i=0; $i < $dirs; $i++) {
> > +               for (my $j=0; $j < $dirs; $j++) {
> > +                       while (system("umount $dir/mnt/mnt$i/subdir$j >& /dev/null") > 0 &&
> > +                              $ignore_fail == 0) {
> > +                               # Nothing - loop until umount succeeds
> > +                       }
> > +               }
> > +               while (system("umount $dir/mnt/mnt$i >& /dev/null") > 0 &&
> > +                      $ignore_fail == 0) {
> > +                       # Nothing - loop until umount succeeds
> > +               }
> > +       }
> > +}
> > +
> > +# Mount and unmount filesystems. We pick random leaf mount so that sometimes
> > +# a leaf mount point root inode will gather more tags from different trees
> > +# and sometimes we will be quicker in unmounting all instances of leaf and
> > +# thus excercise inode evistion path
> > +sub run_mount {
> > +       my($dir,$dirs) = @_;
> > +
> > +       while (1) {
> > +               # We use tmpfs here and not just bind mounts of some dir so
> > +               # that the root inode gets evicted once all instances are
> > +               # unmounted.
> > +               for (my $i=0; $i < $dirs; $i++) {
> > +                       system("mount -t tmpfs none $dir/leaf$i");
> > +               }
> > +               for (my $i=0; $i < $dirs; $i++) {
> > +                       system("mount --bind $dir/dir$i $dir/mnt/mnt$i");
> > +                       for (my $j=0; $j < $dirs; $j++) {
> > +                               my $leaf="$dir/leaf".int(rand($dirs));
> > +                               system("mount --bind $leaf $dir/mnt/mnt$i/subdir$j");
> > +                       }
> > +               }
> > +               umount_all($dir, $dirs, 0);
> > +       }
> > +}
> > +
> > +
> > +###
> > +# setup
> > +
> > +# reset audit
> > +system("auditctl -D >& /dev/null");
> > +
> > +# create temp directory
> > +my $dir = tempdir( TEMPLATE => '/tmp/audit-testsuite-XXXX', CLEANUP => 1 );
> > +
> > +# create stdout/stderr sinks
> > +( my $fh_out, my $stdout ) = tempfile(
> > +       TEMPLATE => '/tmp/audit-testsuite-out-XXXX',
> > +       UNLINK   => 1
> > +);
> > +( my $fh_err, my $stderr ) = tempfile(
> > +       TEMPLATE => '/tmp/audit-testsuite-err-XXXX',
> > +       UNLINK   => 1
> > +);
> > +
> > +###
> > +# tests
> > +
> > +my $dirs = 4;
> > +
> > +# setup directory hierarchy
> > +for (my $i=0; $i < $dirs; $i++) {
> > +       mkdir $dir."/dir".$i;
> > +       for (my $j=0; $j < $dirs; $j++) {
> > +               mkdir $dir."/dir".$i."/subdir".$j;
> > +       }
> > +}
> > +mkdir "$dir/mnt";
> > +for (my $i=0; $i < $dirs; $i++) {
> > +       mkdir "$dir/mnt/mnt$i";
> > +       mkdir "$dir/leaf$i";
> > +}
> > +
> > +my $stat_pid = fork();
> > +
> > +if ($stat_pid == 0) {
> > +       run_stat($dir, $dirs);
> > +       # Never reached
> > +       exit;
> > +}
> > +
> > +my $mount_pid = fork();
> > +
> > +if ($mount_pid == 0) {
> > +       run_mount($dir, $dirs);
> > +       # Never reached
> > +       exit;
> > +}
> > +
> > +my $key = key_gen();
> > +
> > +my $audit_pid = fork();
> > +
> > +if ($audit_pid == 0) {
> > +       run_mark_audit($dir, $dirs, $key);
> > +       # Never reached
> > +       exit;
> > +}
> > +
> > +# Sleep for a minute to let stress test run...
> > +sleep(60);
> > +ok(1);
> > +
> > +###
> > +# cleanup
> > +
> > +kill('KILL', $stat_pid, $mount_pid, $audit_pid);
> > +# Wait for children to terminate
> > +waitpid($stat_pid, 0);
> > +waitpid($mount_pid, 0);
> > +waitpid($audit_pid, 0);
> > +system("auditctl -D >& /dev/null");
> > +umount_all($dir, $dirs, 1);
> > --
> > 2.16.4
> >
> 
> 
> -- 
> paul moore
> www.paul-moore.com
Paul Moore Nov. 19, 2018, 3:19 p.m. UTC | #13
On Wed, Nov 14, 2018 at 7:16 AM Jan Kara <jack@suse.cz> wrote:
> On Tue 13-11-18 19:34:18, Paul Moore wrote:
> > On Tue, Sep 4, 2018 at 12:06 PM Jan Kara <jack@suse.cz> wrote:
> > > Add stress test for stressing audit tree watches by adding and deleting
> > > rules while events are generated and watched filesystems are mounted and
> > > unmounted in parallel.
> > >
> > > Signed-off-by: Jan Kara <jack@suse.cz>
> > > ---
> > >  tests/stress_tree/Makefile |   8 +++
> > >  tests/stress_tree/test     | 171 +++++++++++++++++++++++++++++++++++++++++++++
> > >  2 files changed, 179 insertions(+)
> > >  create mode 100644 tests/stress_tree/Makefile
> > >  create mode 100755 tests/stress_tree/test
> >
> > I'd like to get this into the audit-testsuite repo, but I think it
> > should live under test_manual/ instead of tests, is that okay with
> > you?  If so, no need to resubmit, I can move the file during the
> > merge.
>
> Sure, move it wherever you find it best.

Great, merged into the tests_manual directory, and fixed up some style
nits found via ./tools/check-syntax.  Thanks again!

> > > diff --git a/tests/stress_tree/Makefile b/tests/stress_tree/Makefile
> > > new file mode 100644
> > > index 000000000000..7ade09aad86f
> > > --- /dev/null
> > > +++ b/tests/stress_tree/Makefile
> > > @@ -0,0 +1,8 @@
> > > +TARGETS=$(patsubst %.c,%,$(wildcard *.c))
> > > +
> > > +LDLIBS += -lpthread
> > > +
> > > +all: $(TARGETS)
> > > +clean:
> > > +       rm -f $(TARGETS)
> > > +
> > > diff --git a/tests/stress_tree/test b/tests/stress_tree/test
> > > new file mode 100755
> > > index 000000000000..6215bec810d1
> > > --- /dev/null
> > > +++ b/tests/stress_tree/test
> > > @@ -0,0 +1,171 @@
> > > +#!/usr/bin/perl
> > > +
> > > +use strict;
> > > +
> > > +use Test;
> > > +BEGIN { plan tests => 1 }
> > > +
> > > +use File::Temp qw/ tempdir tempfile /;
> > > +
> > > +###
> > > +# functions
> > > +
> > > +sub key_gen {
> > > +       my @chars = ( "A" .. "Z", "a" .. "z" );
> > > +       my $key = "testsuite-" . time . "-";
> > > +       $key .= $chars[ rand @chars ] for 1 .. 8;
> > > +       return $key;
> > > +}
> > > +
> > > +# Run stat on random files in subtrees to generate audit events
> > > +sub run_stat {
> > > +       my($dir,$dirs) = @_;
> > > +       my $path;
> > > +
> > > +       while (1) {
> > > +               $path = "$dir/mnt/mnt".int(rand($dirs))."/subdir".int(rand($dirs));
> > > +               stat($path);
> > > +       }
> > > +}
> > > +
> > > +# Generate audit rules for subtrees. Do one rule per subtree. Because watch
> > > +# recursively iterates child mounts and we mount $dir/leaf$i under various
> > > +# subtrees, the inode corresponding to $dir/leaf$i gets tagged by different
> > > +# trees.
> > > +sub run_mark_audit {
> > > +       my($dir,$dirs,$key) = @_;
> > > +
> > > +       while (1) {
> > > +               for (my $i=0; $i < $dirs; $i++) {
> > > +                       system("auditctl -w $dir/mnt/mnt$i -p r -k $key");
> > > +               }
> > > +               system("auditctl -D -k $key >& /dev/null");
> > > +       }
> > > +}
> > > +
> > > +sub umount_all {
> > > +       my($dir,$dirs,$ignore_fail) = @_;
> > > +
> > > +       for (my $i=0; $i < $dirs; $i++) {
> > > +               while (system("umount $dir/leaf$i >& /dev/null") > 0 &&
> > > +                      $ignore_fail == 0) {
> > > +                       # Nothing - loop until umount succeeds
> > > +               }
> > > +       }
> > > +       for (my $i=0; $i < $dirs; $i++) {
> > > +               for (my $j=0; $j < $dirs; $j++) {
> > > +                       while (system("umount $dir/mnt/mnt$i/subdir$j >& /dev/null") > 0 &&
> > > +                              $ignore_fail == 0) {
> > > +                               # Nothing - loop until umount succeeds
> > > +                       }
> > > +               }
> > > +               while (system("umount $dir/mnt/mnt$i >& /dev/null") > 0 &&
> > > +                      $ignore_fail == 0) {
> > > +                       # Nothing - loop until umount succeeds
> > > +               }
> > > +       }
> > > +}
> > > +
> > > +# Mount and unmount filesystems. We pick random leaf mount so that sometimes
> > > +# a leaf mount point root inode will gather more tags from different trees
> > > +# and sometimes we will be quicker in unmounting all instances of leaf and
> > > +# thus excercise inode evistion path
> > > +sub run_mount {
> > > +       my($dir,$dirs) = @_;
> > > +
> > > +       while (1) {
> > > +               # We use tmpfs here and not just bind mounts of some dir so
> > > +               # that the root inode gets evicted once all instances are
> > > +               # unmounted.
> > > +               for (my $i=0; $i < $dirs; $i++) {
> > > +                       system("mount -t tmpfs none $dir/leaf$i");
> > > +               }
> > > +               for (my $i=0; $i < $dirs; $i++) {
> > > +                       system("mount --bind $dir/dir$i $dir/mnt/mnt$i");
> > > +                       for (my $j=0; $j < $dirs; $j++) {
> > > +                               my $leaf="$dir/leaf".int(rand($dirs));
> > > +                               system("mount --bind $leaf $dir/mnt/mnt$i/subdir$j");
> > > +                       }
> > > +               }
> > > +               umount_all($dir, $dirs, 0);
> > > +       }
> > > +}
> > > +
> > > +
> > > +###
> > > +# setup
> > > +
> > > +# reset audit
> > > +system("auditctl -D >& /dev/null");
> > > +
> > > +# create temp directory
> > > +my $dir = tempdir( TEMPLATE => '/tmp/audit-testsuite-XXXX', CLEANUP => 1 );
> > > +
> > > +# create stdout/stderr sinks
> > > +( my $fh_out, my $stdout ) = tempfile(
> > > +       TEMPLATE => '/tmp/audit-testsuite-out-XXXX',
> > > +       UNLINK   => 1
> > > +);
> > > +( my $fh_err, my $stderr ) = tempfile(
> > > +       TEMPLATE => '/tmp/audit-testsuite-err-XXXX',
> > > +       UNLINK   => 1
> > > +);
> > > +
> > > +###
> > > +# tests
> > > +
> > > +my $dirs = 4;
> > > +
> > > +# setup directory hierarchy
> > > +for (my $i=0; $i < $dirs; $i++) {
> > > +       mkdir $dir."/dir".$i;
> > > +       for (my $j=0; $j < $dirs; $j++) {
> > > +               mkdir $dir."/dir".$i."/subdir".$j;
> > > +       }
> > > +}
> > > +mkdir "$dir/mnt";
> > > +for (my $i=0; $i < $dirs; $i++) {
> > > +       mkdir "$dir/mnt/mnt$i";
> > > +       mkdir "$dir/leaf$i";
> > > +}
> > > +
> > > +my $stat_pid = fork();
> > > +
> > > +if ($stat_pid == 0) {
> > > +       run_stat($dir, $dirs);
> > > +       # Never reached
> > > +       exit;
> > > +}
> > > +
> > > +my $mount_pid = fork();
> > > +
> > > +if ($mount_pid == 0) {
> > > +       run_mount($dir, $dirs);
> > > +       # Never reached
> > > +       exit;
> > > +}
> > > +
> > > +my $key = key_gen();
> > > +
> > > +my $audit_pid = fork();
> > > +
> > > +if ($audit_pid == 0) {
> > > +       run_mark_audit($dir, $dirs, $key);
> > > +       # Never reached
> > > +       exit;
> > > +}
> > > +
> > > +# Sleep for a minute to let stress test run...
> > > +sleep(60);
> > > +ok(1);
> > > +
> > > +###
> > > +# cleanup
> > > +
> > > +kill('KILL', $stat_pid, $mount_pid, $audit_pid);
> > > +# Wait for children to terminate
> > > +waitpid($stat_pid, 0);
> > > +waitpid($mount_pid, 0);
> > > +waitpid($audit_pid, 0);
> > > +system("auditctl -D >& /dev/null");
> > > +umount_all($dir, $dirs, 1);
> > > --
> > > 2.16.4
> > >
> >
> >
> > --
> > paul moore
> > www.paul-moore.com
> --
> Jan Kara <jack@suse.com>
> SUSE Labs, CR

Patch
diff mbox series

diff --git a/tests/stress_tree/Makefile b/tests/stress_tree/Makefile
new file mode 100644
index 000000000000..7ade09aad86f
--- /dev/null
+++ b/tests/stress_tree/Makefile
@@ -0,0 +1,8 @@ 
+TARGETS=$(patsubst %.c,%,$(wildcard *.c))
+
+LDLIBS += -lpthread
+
+all: $(TARGETS)
+clean:
+	rm -f $(TARGETS)
+
diff --git a/tests/stress_tree/test b/tests/stress_tree/test
new file mode 100755
index 000000000000..6215bec810d1
--- /dev/null
+++ b/tests/stress_tree/test
@@ -0,0 +1,171 @@ 
+#!/usr/bin/perl
+
+use strict;
+
+use Test;
+BEGIN { plan tests => 1 }
+
+use File::Temp qw/ tempdir tempfile /;
+
+###
+# functions
+
+sub key_gen {
+	my @chars = ( "A" .. "Z", "a" .. "z" );
+	my $key = "testsuite-" . time . "-";
+	$key .= $chars[ rand @chars ] for 1 .. 8;
+	return $key;
+}
+
+# Run stat on random files in subtrees to generate audit events
+sub run_stat {
+	my($dir,$dirs) = @_;
+	my $path;
+
+	while (1) {
+		$path = "$dir/mnt/mnt".int(rand($dirs))."/subdir".int(rand($dirs));
+		stat($path);
+	}
+}
+
+# Generate audit rules for subtrees. Do one rule per subtree. Because watch
+# recursively iterates child mounts and we mount $dir/leaf$i under various
+# subtrees, the inode corresponding to $dir/leaf$i gets tagged by different
+# trees.
+sub run_mark_audit {
+	my($dir,$dirs,$key) = @_;
+
+	while (1) {
+		for (my $i=0; $i < $dirs; $i++) {
+			system("auditctl -w $dir/mnt/mnt$i -p r -k $key");
+		}
+		system("auditctl -D -k $key >& /dev/null");
+	}
+}
+
+sub umount_all {
+	my($dir,$dirs,$ignore_fail) = @_;
+
+	for (my $i=0; $i < $dirs; $i++) {
+		while (system("umount $dir/leaf$i >& /dev/null") > 0 &&
+		       $ignore_fail == 0) {
+			# Nothing - loop until umount succeeds
+		}
+	}
+	for (my $i=0; $i < $dirs; $i++) {
+		for (my $j=0; $j < $dirs; $j++) {
+			while (system("umount $dir/mnt/mnt$i/subdir$j >& /dev/null") > 0 &&
+			       $ignore_fail == 0) {
+				# Nothing - loop until umount succeeds
+			}
+		}
+		while (system("umount $dir/mnt/mnt$i >& /dev/null") > 0 &&
+		       $ignore_fail == 0) {
+			# Nothing - loop until umount succeeds
+		}
+	}
+}
+
+# Mount and unmount filesystems. We pick random leaf mount so that sometimes
+# a leaf mount point root inode will gather more tags from different trees
+# and sometimes we will be quicker in unmounting all instances of leaf and
+# thus excercise inode evistion path
+sub run_mount {
+	my($dir,$dirs) = @_;
+
+	while (1) {
+		# We use tmpfs here and not just bind mounts of some dir so
+		# that the root inode gets evicted once all instances are
+		# unmounted.
+		for (my $i=0; $i < $dirs; $i++) {
+			system("mount -t tmpfs none $dir/leaf$i");
+		}
+		for (my $i=0; $i < $dirs; $i++) {
+			system("mount --bind $dir/dir$i $dir/mnt/mnt$i");
+			for (my $j=0; $j < $dirs; $j++) {
+				my $leaf="$dir/leaf".int(rand($dirs));
+				system("mount --bind $leaf $dir/mnt/mnt$i/subdir$j");
+			}
+		}
+		umount_all($dir, $dirs, 0);
+	}
+}
+
+
+###
+# setup
+
+# reset audit
+system("auditctl -D >& /dev/null");
+
+# create temp directory
+my $dir = tempdir( TEMPLATE => '/tmp/audit-testsuite-XXXX', CLEANUP => 1 );
+
+# create stdout/stderr sinks
+( my $fh_out, my $stdout ) = tempfile(
+	TEMPLATE => '/tmp/audit-testsuite-out-XXXX',
+	UNLINK   => 1
+);
+( my $fh_err, my $stderr ) = tempfile(
+	TEMPLATE => '/tmp/audit-testsuite-err-XXXX',
+	UNLINK   => 1
+);
+
+###
+# tests
+
+my $dirs = 4;
+
+# setup directory hierarchy
+for (my $i=0; $i < $dirs; $i++) {
+	mkdir $dir."/dir".$i;
+	for (my $j=0; $j < $dirs; $j++) {
+		mkdir $dir."/dir".$i."/subdir".$j;
+	}
+}
+mkdir "$dir/mnt";
+for (my $i=0; $i < $dirs; $i++) {
+	mkdir "$dir/mnt/mnt$i";
+	mkdir "$dir/leaf$i";
+}
+
+my $stat_pid = fork();
+
+if ($stat_pid == 0) {
+	run_stat($dir, $dirs);
+	# Never reached
+	exit;
+}
+
+my $mount_pid = fork();
+
+if ($mount_pid == 0) {
+	run_mount($dir, $dirs);
+	# Never reached
+	exit;
+}
+
+my $key = key_gen();
+
+my $audit_pid = fork();
+
+if ($audit_pid == 0) {
+	run_mark_audit($dir, $dirs, $key);
+	# Never reached
+	exit;
+}
+
+# Sleep for a minute to let stress test run...
+sleep(60);
+ok(1);
+
+###
+# cleanup
+
+kill('KILL', $stat_pid, $mount_pid, $audit_pid);
+# Wait for children to terminate
+waitpid($stat_pid, 0);
+waitpid($mount_pid, 0);
+waitpid($audit_pid, 0);
+system("auditctl -D >& /dev/null");
+umount_all($dir, $dirs, 1);