diff mbox series

[net-next,v3,09/12] selftests: ncdevmem: Remove hard-coded queue numbers

Message ID 20241009171252.2328284-10-sdf@fomichev.me (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series selftests: ncdevmem: Add ncdevmem to ksft | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5 this patch: 5
netdev/build_tools success Errors and warnings before: 0 (+2) this patch: 0 (+2)
netdev/cc_maintainers warning 2 maintainers not CCed: linux-kselftest@vger.kernel.org shuah@kernel.org
netdev/build_clang success Errors and warnings before: 5 this patch: 5
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 3 this patch: 3
netdev/checkpatch warning WARNING: line length of 96 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Stanislav Fomichev Oct. 9, 2024, 5:12 p.m. UTC
Use single last queue of the device and probe it dynamically.

Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
---
 tools/testing/selftests/net/ncdevmem.c | 40 ++++++++++++++++++++++++--
 1 file changed, 38 insertions(+), 2 deletions(-)

Comments

Mina Almasry Oct. 13, 2024, 3:50 a.m. UTC | #1
On Wed, Oct 9, 2024 at 10:13 AM Stanislav Fomichev <sdf@fomichev.me> wrote:
>
> Use single last queue of the device and probe it dynamically.
>

Sorry I thought agreed that multi-queue binding test coverage is important.

Can you please leave the default of num_queues to be 8 queues, or
rxq_num / 2? You can override num_queues to 1 in your test invocations
if you want. I would like by default an unaware tester that doesn't
set num_queues explicitly to get multi-queue test coverage.
> Cc: Mina Almasry <almasrymina@google.com>
> Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
> ---
>  tools/testing/selftests/net/ncdevmem.c | 40 ++++++++++++++++++++++++--
>  1 file changed, 38 insertions(+), 2 deletions(-)
>
> diff --git a/tools/testing/selftests/net/ncdevmem.c b/tools/testing/selftests/net/ncdevmem.c
> index 02ba3f368888..90aacfb3433f 100644
> --- a/tools/testing/selftests/net/ncdevmem.c
> +++ b/tools/testing/selftests/net/ncdevmem.c
> @@ -63,8 +63,8 @@ static char *server_ip;
>  static char *client_ip;
>  static char *port;
>  static size_t do_validation;
> -static int start_queue = 8;
> -static int num_queues = 8;
> +static int start_queue = -1;
> +static int num_queues = 1;
>  static char *ifname;
>  static unsigned int ifindex;
>  static unsigned int dmabuf_id;
> @@ -196,6 +196,33 @@ void validate_buffer(void *line, size_t size)
>         fprintf(stdout, "Validated buffer\n");
>  }
>
> +static int rxq_num(int ifindex)
> +{
> +       struct ethtool_channels_get_req *req;
> +       struct ethtool_channels_get_rsp *rsp;
> +       struct ynl_error yerr;
> +       struct ynl_sock *ys;
> +       int num = -1;
> +
> +       ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
> +       if (!ys) {
> +               fprintf(stderr, "YNL: %s\n", yerr.msg);
> +               return -1;
> +       }
> +
> +       req = ethtool_channels_get_req_alloc();
> +       ethtool_channels_get_req_set_header_dev_index(req, ifindex);
> +       rsp = ethtool_channels_get(ys, req);
> +       if (rsp)
> +               num = rsp->rx_count + rsp->combined_count;
> +       ethtool_channels_get_req_free(req);
> +       ethtool_channels_get_rsp_free(rsp);
> +
> +       ynl_sock_destroy(ys);
> +
> +       return num;
> +}
> +
>  #define run_command(cmd, ...)                                           \
>         ({                                                              \
>                 char command[256];                                      \
> @@ -690,6 +717,15 @@ int main(int argc, char *argv[])
>
>         ifindex = if_nametoindex(ifname);
>
> +       if (start_queue < 0) {
> +               start_queue = rxq_num(ifindex) - 1;
> +
> +               if (start_queue < 0)
> +                       error(1, 0, "couldn't detect number of queues\n");
> +
> +               fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues);
> +       }
> +
>         for (; optind < argc; optind++)
>                 fprintf(stderr, "extra arguments: %s\n", argv[optind]);
>
> --
> 2.47.0
>


--
Thanks,
Mina
Stanislav Fomichev Oct. 14, 2024, 2:47 p.m. UTC | #2
On 10/12, Mina Almasry wrote:
> On Wed, Oct 9, 2024 at 10:13 AM Stanislav Fomichev <sdf@fomichev.me> wrote:
> >
> > Use single last queue of the device and probe it dynamically.
> >
> 
> Sorry I thought agreed that multi-queue binding test coverage is important.
> 
> Can you please leave the default of num_queues to be 8 queues, or
> rxq_num / 2? You can override num_queues to 1 in your test invocations
> if you want. I would like by default an unaware tester that doesn't
> set num_queues explicitly to get multi-queue test coverage.

I might have misunderstood the agreement :-) I though you were ok with
the following arrangement:

1. use num_queues / 2 in the selftest mode to make sure binding to multiple
   queues works (and this gets exercised from the python kselftest)
2. use single queue for the actual data path test (since we are
   installing single flow steering rule, having multiple queues here is
   confusing)

The num_queues / 2 part is here:
https://lore.kernel.org/netdev/20241009171252.2328284-11-sdf@fomichev.me/

Anything I'm missing?
Mina Almasry Oct. 14, 2024, 10:49 p.m. UTC | #3
On Mon, Oct 14, 2024 at 5:47 PM Stanislav Fomichev <stfomichev@gmail.com> wrote:
>
> On 10/12, Mina Almasry wrote:
> > On Wed, Oct 9, 2024 at 10:13 AM Stanislav Fomichev <sdf@fomichev.me> wrote:
> > >
> > > Use single last queue of the device and probe it dynamically.
> > >
> >
> > Sorry I thought agreed that multi-queue binding test coverage is important.
> >
> > Can you please leave the default of num_queues to be 8 queues, or
> > rxq_num / 2? You can override num_queues to 1 in your test invocations
> > if you want. I would like by default an unaware tester that doesn't
> > set num_queues explicitly to get multi-queue test coverage.
>
> I might have misunderstood the agreement :-) I though you were ok with
> the following arrangement:
>
> 1. use num_queues / 2 in the selftest mode to make sure binding to multiple
>    queues works (and this gets exercised from the python kselftest)
> 2. use single queue for the actual data path test (since we are
>    installing single flow steering rule, having multiple queues here is
>    confusing)
>
> The num_queues / 2 part is here:
> https://lore.kernel.org/netdev/20241009171252.2328284-11-sdf@fomichev.me/
>
> Anything I'm missing?

Sorry, I indeed missed that this is reworked in patch 10/12. Squashing
the patches could be OK, because the changes to num_queues here are
essentially reworked in the next patch, but this is also fine.

Reviewed-by: Mina Almasry <almasrymina@google.com>
diff mbox series

Patch

diff --git a/tools/testing/selftests/net/ncdevmem.c b/tools/testing/selftests/net/ncdevmem.c
index 02ba3f368888..90aacfb3433f 100644
--- a/tools/testing/selftests/net/ncdevmem.c
+++ b/tools/testing/selftests/net/ncdevmem.c
@@ -63,8 +63,8 @@  static char *server_ip;
 static char *client_ip;
 static char *port;
 static size_t do_validation;
-static int start_queue = 8;
-static int num_queues = 8;
+static int start_queue = -1;
+static int num_queues = 1;
 static char *ifname;
 static unsigned int ifindex;
 static unsigned int dmabuf_id;
@@ -196,6 +196,33 @@  void validate_buffer(void *line, size_t size)
 	fprintf(stdout, "Validated buffer\n");
 }
 
+static int rxq_num(int ifindex)
+{
+	struct ethtool_channels_get_req *req;
+	struct ethtool_channels_get_rsp *rsp;
+	struct ynl_error yerr;
+	struct ynl_sock *ys;
+	int num = -1;
+
+	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
+	if (!ys) {
+		fprintf(stderr, "YNL: %s\n", yerr.msg);
+		return -1;
+	}
+
+	req = ethtool_channels_get_req_alloc();
+	ethtool_channels_get_req_set_header_dev_index(req, ifindex);
+	rsp = ethtool_channels_get(ys, req);
+	if (rsp)
+		num = rsp->rx_count + rsp->combined_count;
+	ethtool_channels_get_req_free(req);
+	ethtool_channels_get_rsp_free(rsp);
+
+	ynl_sock_destroy(ys);
+
+	return num;
+}
+
 #define run_command(cmd, ...)                                           \
 	({                                                              \
 		char command[256];                                      \
@@ -690,6 +717,15 @@  int main(int argc, char *argv[])
 
 	ifindex = if_nametoindex(ifname);
 
+	if (start_queue < 0) {
+		start_queue = rxq_num(ifindex) - 1;
+
+		if (start_queue < 0)
+			error(1, 0, "couldn't detect number of queues\n");
+
+		fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues);
+	}
+
 	for (; optind < argc; optind++)
 		fprintf(stderr, "extra arguments: %s\n", argv[optind]);