From patchwork Tue Dec 19 08:14:50 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Abdul Rahim, Faizal" X-Patchwork-Id: 13497998 X-Patchwork-Delegate: kuba@kernel.org Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 03F4E11CBF; Tue, 19 Dec 2023 08:17:27 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="WCIsToeM" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1702973849; x=1734509849; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=eprSKxW1IRUgwnlytJGk1l5+3qPhJHvreOABgX1Ah2I=; b=WCIsToeM+t1RU4mhoJVnikANqwHnz0Dmw4NN4S8ywz8dU5+mwoXsPo/U CrA25euqQyzABy/5f2if6hqVbbmhknOfGUMazLdzW88nrPDLmutDdWCkV QuFRUMn8Z5ufIOJhkcQfGFkTsO4cfXN0S0p6yfNbxOIZEY9WpcjIDYavu llC7yHKupY4s/IAQr7DvpsltgaP5zSI2saMc+omO0kvAqb0rojc+bSbRB G12hvl0P4O+xBYUEvUGNLiiQReX7M7LQWu5tpl5+bzrd9uS2RDjqvSL2O vBm3sZ0Q4oFZKTofJzpAdQb9HxNhOAtTrxVx/OUgCVTw3Y3k83lJeJRus w==; X-IronPort-AV: E=McAfee;i="6600,9927,10928"; a="2717413" X-IronPort-AV: E=Sophos;i="6.04,287,1695711600"; d="scan'208";a="2717413" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2023 00:17:28 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10928"; a="894193921" X-IronPort-AV: E=Sophos;i="6.04,287,1695711600"; d="scan'208";a="894193921" Received: from linux.intel.com ([10.54.29.200]) by fmsmga002.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2023 00:17:25 -0800 Received: from mohdfai2-iLBPG12-1.png.intel.com (mohdfai2-iLBPG12-1.png.intel.com [10.88.227.73]) by linux.intel.com (Postfix) with ESMTP id 9BE50580C25; Tue, 19 Dec 2023 00:17:22 -0800 (PST) From: Faizal Rahim To: Vladimir Oltean , Vinicius Costa Gomes , Jamal Hadi Salim , Cong Wang , Jiri Pirko , "David S . Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH v3 net 1/4] net/sched: taprio: fix too early schedules switching Date: Tue, 19 Dec 2023 03:14:50 -0500 Message-Id: <20231219081453.718489-2-faizal.abdul.rahim@linux.intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231219081453.718489-1-faizal.abdul.rahim@linux.intel.com> References: <20231219081453.718489-1-faizal.abdul.rahim@linux.intel.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org Switching the schedule prematurely leads to a situation where the last entry from oper schedule is still running, during this period, calls to taprio_skb_exceeds_queue_max_sdu() in the enqueue path, such as taprio_enqueue_segmented(), will inspect q->oper_sched. At this point, q->oper_sched refers to the new admin schedule instead of the ongoing oper schedule. Fixes: a878fd46fe43 ("net/sched: keep the max_frm_len information inside struct sched_gate_list") Signed-off-by: Faizal Rahim --- net/sched/sch_taprio.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 31a8252bd09c..bbcaf05d40ba 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -41,6 +41,7 @@ static struct static_key_false taprio_have_working_mqprio; #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) #define TAPRIO_FLAGS_INVALID U32_MAX +#define CYCLE_TIME_CORRECTION_UNSPEC S64_MIN struct sched_entry { /* Durations between this GCL entry and the GCL entry where the @@ -75,6 +76,7 @@ struct sched_gate_list { ktime_t cycle_end_time; s64 cycle_time; s64 cycle_time_extension; + s64 cycle_time_correction; s64 base_time; }; @@ -213,6 +215,11 @@ static void switch_schedules(struct taprio_sched *q, *admin = NULL; } +static bool sched_switch_pending(const struct sched_gate_list *oper) +{ + return oper->cycle_time_correction != CYCLE_TIME_CORRECTION_UNSPEC; +} + /* Get how much time has been already elapsed in the current cycle. */ static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) { @@ -940,7 +947,7 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer) admin = rcu_dereference_protected(q->admin_sched, lockdep_is_held(&q->current_entry_lock)); - if (!oper) + if (!oper || sched_switch_pending(oper)) switch_schedules(q, &admin, &oper); /* This can happen in two cases: 1. this is the very first run @@ -981,7 +988,7 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer) * schedule runs. */ end_time = sched_base_time(admin); - switch_schedules(q, &admin, &oper); + oper->cycle_time_correction = 0; } next->end_time = end_time; @@ -1174,6 +1181,7 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, } taprio_calculate_gate_durations(q, new); + new->cycle_time_correction = CYCLE_TIME_CORRECTION_UNSPEC; return 0; } From patchwork Tue Dec 19 08:14:51 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Abdul Rahim, Faizal" X-Patchwork-Id: 13497999 X-Patchwork-Delegate: kuba@kernel.org Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.136]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 26CBB12B78; Tue, 19 Dec 2023 08:17:29 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="VKUWjP01" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1702973850; x=1734509850; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=jjXTXnJa1L2JtrFjlBmPIIt05pJnwrPqu06KvHw7ies=; b=VKUWjP015BK8C9aH5udqgsWy2k7TJ8j877td7XvEuC+SE0fb7AweIxPW 1q3SJfR4owKuEE2wfnMi+TY80x2IIJa1Lc6qA3Pjssff6ZgSyiZpnjlYf p+x7eSNch6nDtcpxxpLYD8Z89zEQ8su3cax3CoV6nvToAn/h2tRU93A/h yEVjmcp0F7S3rfVPXNpzqGxt28PMIoay0Ke+cpb05lkBgsvUXlUkdeD8N sGmssXRrPeBPDnco6HVT2YpYJuJbDufi1fReDsGR/N2Gg1KREbyYWPcC4 wp18MCnKMX1OMJcCvgAI5QEW77Kj6Y34/aUGrKk5gFUd2sZPvhCj48enT Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10928"; a="375114449" X-IronPort-AV: E=Sophos;i="6.04,287,1695711600"; d="scan'208";a="375114449" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2023 00:17:29 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10928"; a="779399545" X-IronPort-AV: E=Sophos;i="6.04,287,1695711600"; d="scan'208";a="779399545" Received: from linux.intel.com ([10.54.29.200]) by fmsmga007.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2023 00:17:28 -0800 Received: from mohdfai2-iLBPG12-1.png.intel.com (mohdfai2-iLBPG12-1.png.intel.com [10.88.227.73]) by linux.intel.com (Postfix) with ESMTP id BBBB0580E21; Tue, 19 Dec 2023 00:17:25 -0800 (PST) From: Faizal Rahim To: Vladimir Oltean , Vinicius Costa Gomes , Jamal Hadi Salim , Cong Wang , Jiri Pirko , "David S . Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH v3 net 2/4] net/sched: taprio: fix cycle time adjustment for next entry Date: Tue, 19 Dec 2023 03:14:51 -0500 Message-Id: <20231219081453.718489-3-faizal.abdul.rahim@linux.intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231219081453.718489-1-faizal.abdul.rahim@linux.intel.com> References: <20231219081453.718489-1-faizal.abdul.rahim@linux.intel.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org According to IEEE Std. 802.1Q-2018 section Q.5 CycleTimeExtension: "the Cycle Time Extension variable allows this extension of the last old cycle to be done in a defined way. If the last complete old cycle would normally end less than OperCycleTimeExtension nanoseconds before the new base time, then the last complete cycle before AdminBaseTime is reached is extended so that it ends at AdminBaseTime." Fix cyle time modification logic for the next entry that includes the following cases: a) positive correction - cycle time extension b) negative correction - cycle time truncation c) zero correction - new admin base time aligns exactly with the old cycle Fixes: a3d43c0d56f1 ("taprio: Add support adding an admin schedule") Signed-off-by: Faizal Rahim --- net/sched/sch_taprio.c | 100 +++++++++++++++++++++++++---------------- 1 file changed, 62 insertions(+), 38 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index bbcaf05d40ba..e70dc69c311f 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -893,38 +893,54 @@ static bool should_restart_cycle(const struct sched_gate_list *oper, return false; } -static bool should_change_schedules(const struct sched_gate_list *admin, - const struct sched_gate_list *oper, - ktime_t end_time) -{ - ktime_t next_base_time, extension_time; - - if (!admin) - return false; - - next_base_time = sched_base_time(admin); - - /* This is the simple case, the end_time would fall after - * the next schedule base_time. - */ - if (ktime_compare(next_base_time, end_time) <= 0) - return true; - - /* This is the cycle_time_extension case, if the end_time - * plus the amount that can be extended would fall after the - * next schedule base_time, we can extend the current schedule - * for that amount. - */ - extension_time = ktime_add_ns(end_time, oper->cycle_time_extension); +static bool should_extend_cycle(const struct sched_gate_list *oper, + ktime_t new_base_time, + ktime_t next_entry_end_time, + const struct sched_entry *next_entry) +{ + ktime_t next_cycle_end_time = ktime_add_ns(oper->cycle_end_time, + oper->cycle_time); + bool extension_supported = oper->cycle_time_extension > 0; + s64 extension_limit = oper->cycle_time_extension; + s64 extension_duration = ktime_sub(new_base_time, next_entry_end_time); + + return extension_supported && + list_is_last(&next_entry->list, &oper->entries) && + ktime_before(new_base_time, next_cycle_end_time) && + extension_duration < extension_limit; +} + +static s64 get_cycle_time_correction(const struct sched_gate_list *oper, + ktime_t new_base_time, + ktime_t next_entry_end_time, + const struct sched_entry *next_entry) +{ + s64 correction = CYCLE_TIME_CORRECTION_UNSPEC; + + if (ktime_compare(new_base_time, next_entry_end_time) <= 0) { + /* Negative correction - The new admin base time starts earlier + * than the next entry's end time. + * Zero correction - The new admin base time aligns exactly + * with the old cycle. + */ + correction = ktime_sub(new_base_time, next_entry_end_time); - /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about - * how precisely the extension should be made. So after - * conformance testing, this logic may change. - */ - if (ktime_compare(next_base_time, extension_time) <= 0) - return true; + /* Below is to hande potential issue where the negative correction + * exceed the entry's interval. This typically shouldn't happen. + * Setting to 0 enables schedule changes without altering cycle time. + */ + if (abs(correction) > next_entry->interval) + correction = 0; + } else if (ktime_after(new_base_time, next_entry_end_time) && + should_extend_cycle(oper, new_base_time, + next_entry_end_time, next_entry)) { + /* Positive correction - The new admin base time starts after the + * last entry end time and within the next cycle time of old oper. + */ + correction = ktime_sub(new_base_time, next_entry_end_time); + } - return false; + return correction; } static enum hrtimer_restart advance_sched(struct hrtimer *timer) @@ -975,6 +991,22 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer) end_time = ktime_add_ns(entry->end_time, next->interval); end_time = min_t(ktime_t, end_time, oper->cycle_end_time); + if (admin) { + ktime_t new_base_time = sched_base_time(admin); + + oper->cycle_time_correction = + get_cycle_time_correction(oper, new_base_time, + end_time, next); + + if (sched_switch_pending(oper)) { + /* The next entry is the last entry we will run from + * oper, subsequent ones will take from the new admin + */ + oper->cycle_end_time = new_base_time; + end_time = new_base_time; + } + } + for (tc = 0; tc < num_tc; tc++) { if (next->gate_duration[tc] == oper->cycle_time) next->gate_close_time[tc] = KTIME_MAX; @@ -983,14 +1015,6 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer) next->gate_duration[tc]); } - if (should_change_schedules(admin, oper, end_time)) { - /* Set things so the next time this runs, the new - * schedule runs. - */ - end_time = sched_base_time(admin); - oper->cycle_time_correction = 0; - } - next->end_time = end_time; taprio_set_budgets(q, oper, next); From patchwork Tue Dec 19 08:14:52 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Abdul Rahim, Faizal" X-Patchwork-Id: 13498000 X-Patchwork-Delegate: kuba@kernel.org Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.136]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1A82813AFC; Tue, 19 Dec 2023 08:17:33 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="jqZStwWR" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1702973853; x=1734509853; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=D7640LkdpJNYX5Nvkm2pD/vUiW6p3SvTqgSx3dehc7U=; b=jqZStwWRnJTtH2mG1KMM2N7SiDPlLqBQKLSRuz5HUrIOzHq7U9syVIiF 4YGhkJ3+auT5Kduj55FExyyZ2CoIJMWQ4sdB3r1UeYr7bmsSBLRDIsRTM 5axBhSaJCRV97/PcKirZqVoFFfwL+syyh5tndo7yE3hTdgdRBRPTtvcFm raELyNASwiYNL1FiiISMH2vLGrZmwFzQO2FLNvf0/qRFRvGhtOja8XUTl v4i1y8BnfdkHhsIjfI0pJuLTeHRcs/s8AZu7Hv74KEB/sgAKwm64zoblb 5bq+EP1MFsrwSas+VKBkI1pU6o25EzxRIiAXd0GZKWRETVxRkomu8oOcV w==; X-IronPort-AV: E=McAfee;i="6600,9927,10928"; a="375114458" X-IronPort-AV: E=Sophos;i="6.04,287,1695711600"; d="scan'208";a="375114458" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2023 00:17:32 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10928"; a="779399568" X-IronPort-AV: E=Sophos;i="6.04,287,1695711600"; d="scan'208";a="779399568" Received: from linux.intel.com ([10.54.29.200]) by fmsmga007.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2023 00:17:32 -0800 Received: from mohdfai2-iLBPG12-1.png.intel.com (mohdfai2-iLBPG12-1.png.intel.com [10.88.227.73]) by linux.intel.com (Postfix) with ESMTP id D94FE580C25; Tue, 19 Dec 2023 00:17:28 -0800 (PST) From: Faizal Rahim To: Vladimir Oltean , Vinicius Costa Gomes , Jamal Hadi Salim , Cong Wang , Jiri Pirko , "David S . Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH v3 net 3/4] net/sched: taprio: fix impacted fields value during cycle time adjustment Date: Tue, 19 Dec 2023 03:14:52 -0500 Message-Id: <20231219081453.718489-4-faizal.abdul.rahim@linux.intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231219081453.718489-1-faizal.abdul.rahim@linux.intel.com> References: <20231219081453.718489-1-faizal.abdul.rahim@linux.intel.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org During the cycle time adjustment period, there's a single entry left from the oper schedule to be executed. As a result, updates are needed for the affected fields' logic, which did not previously consider dynamic scheduling. Fixes: a306a90c8ffe ("net/sched: taprio: calculate tc gate durations") Signed-off-by: Faizal Rahim --- net/sched/sch_taprio.c | 44 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index e70dc69c311f..a3c71be21af2 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -285,7 +285,8 @@ static void taprio_update_queue_max_sdu(struct taprio_sched *q, /* TC gate never closes => keep the queueMaxSDU * selected by the user */ - if (sched->max_open_gate_duration[tc] == sched->cycle_time) { + if (sched->max_open_gate_duration[tc] == sched->cycle_time && + !sched_switch_pending(sched)) { max_sdu_dynamic = U32_MAX; } else { u32 max_frm_len; @@ -681,7 +682,8 @@ static void taprio_set_budgets(struct taprio_sched *q, for (tc = 0; tc < num_tc; tc++) { /* Traffic classes which never close have infinite budget */ - if (entry->gate_duration[tc] == sched->cycle_time) + if (entry->gate_duration[tc] == sched->cycle_time && + !sched_switch_pending(sched)) budget = INT_MAX; else budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC, @@ -893,6 +895,29 @@ static bool should_restart_cycle(const struct sched_gate_list *oper, return false; } +/* Open gate duration were calculated at the beginning with consideration of + * multiple entries. If sched_switch_pending() is active, there's only a single + * remaining entry left from oper to run. Update open gate duration based + * on this last entry. + */ +static void update_open_gate_duration(struct sched_entry *entry, + struct sched_gate_list *oper, + int num_tc, + u64 open_gate_duration) +{ + int tc; + + for (tc = 0; tc < num_tc; tc++) { + if (entry->gate_mask & BIT(tc)) { + entry->gate_duration[tc] = open_gate_duration; + oper->max_open_gate_duration[tc] = open_gate_duration; + } else { + entry->gate_duration[tc] = 0; + oper->max_open_gate_duration[tc] = 0; + } + } +} + static bool should_extend_cycle(const struct sched_gate_list *oper, ktime_t new_base_time, ktime_t next_entry_end_time, @@ -1002,13 +1027,26 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer) /* The next entry is the last entry we will run from * oper, subsequent ones will take from the new admin */ + u64 new_gate_duration = + next->interval + oper->cycle_time_correction; + struct qdisc_size_table *stab; + oper->cycle_end_time = new_base_time; end_time = new_base_time; + + update_open_gate_duration(next, oper, num_tc, + new_gate_duration); + rcu_read_lock(); + stab = rcu_dereference(q->root->stab); + taprio_update_queue_max_sdu(q, oper, stab); + rcu_read_unlock(); } } for (tc = 0; tc < num_tc; tc++) { - if (next->gate_duration[tc] == oper->cycle_time) + if (sched_switch_pending(oper) && (next->gate_mask & BIT(tc))) + next->gate_close_time[tc] = end_time; + else if (next->gate_duration[tc] == oper->cycle_time) next->gate_close_time[tc] = KTIME_MAX; else next->gate_close_time[tc] = ktime_add_ns(entry->end_time, From patchwork Tue Dec 19 08:14:53 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Abdul Rahim, Faizal" X-Patchwork-Id: 13498016 X-Patchwork-Delegate: kuba@kernel.org Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.136]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A22D614267; Tue, 19 Dec 2023 08:17:35 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="ecYOVDfq" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1702973855; x=1734509855; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=gA33C88lLypI/mnTgB5T0KSA6Hi+/UFBm/D8v5cDsfs=; b=ecYOVDfqfOrCpJTwzva/tGq0FpWb4/NYUsA3fxM0Jk2sbWeu5D1AeaZO UJp3vnW+O7HRF7x2AgYkzIpV0N70fOpRbbRHh0KQ4qZeXQ2MmBDRaweO+ +BE/InksEctLEkDcYjKL7xnrNvpjm0ER08Ew0eAd1KLt4HpA2CiPOctDv z1fN1TrCowOzSx58IhITkBTBuR9TbKMLgL9a9FrcHCOeSfd45+4Vt5n50 leEKigNbhw5b8fdR5ahX7GeSFOlgjm2/hMZgf+5ptIeHpRG/TIzMWU5bw 0RuODxCnZCE3BQh4iMhm3WKitPYr386Qx6z82JRjO6bKC2k/j7nTGqh9c Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10928"; a="375114471" X-IronPort-AV: E=Sophos;i="6.04,287,1695711600"; d="scan'208";a="375114471" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2023 00:17:35 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10928"; a="779399580" X-IronPort-AV: E=Sophos;i="6.04,287,1695711600"; d="scan'208";a="779399580" Received: from linux.intel.com ([10.54.29.200]) by fmsmga007.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2023 00:17:34 -0800 Received: from mohdfai2-iLBPG12-1.png.intel.com (mohdfai2-iLBPG12-1.png.intel.com [10.88.227.73]) by linux.intel.com (Postfix) with ESMTP id EFAC4580E34; Tue, 19 Dec 2023 00:17:31 -0800 (PST) From: Faizal Rahim To: Vladimir Oltean , Vinicius Costa Gomes , Jamal Hadi Salim , Cong Wang , Jiri Pirko , "David S . Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH v3 net 4/4] net/sched: taprio: get corrected value of cycle_time and interval Date: Tue, 19 Dec 2023 03:14:53 -0500 Message-Id: <20231219081453.718489-5-faizal.abdul.rahim@linux.intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231219081453.718489-1-faizal.abdul.rahim@linux.intel.com> References: <20231219081453.718489-1-faizal.abdul.rahim@linux.intel.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org Added a new field, correction_active to determine the entry's correction state. This field is required due to specific flow like find_entry_to_transmit() -> get_interval_end_time() which retrieves the interval for each entry. During positive cycle time correction, it's known that the last entry interval requires correction. However, for negative correction, the affected entry is unknown, which is why this new field is necessary. Note that in some cases where the original values are required, such as in dump_schedule() and setup_first_end_time(), direct calls to cycle_time and interval are retained without using the new functions. Signed-off-by: Faizal Rahim --- net/sched/sch_taprio.c | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index a3c71be21af2..d11ddb1f554c 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -61,6 +61,7 @@ struct sched_entry { u32 gate_mask; u32 interval; u8 command; + bool correction_active; }; struct sched_gate_list { @@ -220,6 +221,14 @@ static bool sched_switch_pending(const struct sched_gate_list *oper) return oper->cycle_time_correction != CYCLE_TIME_CORRECTION_UNSPEC; } +static s64 get_cycle_time(const struct sched_gate_list *oper) +{ + if (sched_switch_pending(oper)) + return oper->cycle_time + oper->cycle_time_correction; + else + return oper->cycle_time; +} + /* Get how much time has been already elapsed in the current cycle. */ static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) { @@ -227,11 +236,20 @@ static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) s32 time_elapsed; time_since_sched_start = ktime_sub(time, sched->base_time); - div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed); + div_s64_rem(time_since_sched_start, get_cycle_time(sched), &time_elapsed); return time_elapsed; } +static u32 get_interval(const struct sched_entry *entry, + const struct sched_gate_list *oper) +{ + if (entry->correction_active) + return entry->interval + oper->cycle_time_correction; + else + return entry->interval; +} + static ktime_t get_interval_end_time(struct sched_gate_list *sched, struct sched_gate_list *admin, struct sched_entry *entry, @@ -240,8 +258,9 @@ static ktime_t get_interval_end_time(struct sched_gate_list *sched, s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start); ktime_t intv_end, cycle_ext_end, cycle_end; - cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed); - intv_end = ktime_add_ns(intv_start, entry->interval); + cycle_end = ktime_add_ns(intv_start, + get_cycle_time(sched) - cycle_elapsed); + intv_end = ktime_add_ns(intv_start, get_interval(entry, sched)); cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension); if (ktime_before(intv_end, cycle_end)) @@ -348,7 +367,7 @@ static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, if (!sched) return NULL; - cycle = sched->cycle_time; + cycle = get_cycle_time(sched); cycle_elapsed = get_cycle_time_elapsed(sched, time); curr_intv_end = ktime_sub_ns(time, cycle_elapsed); cycle_end = ktime_add_ns(curr_intv_end, cycle); @@ -362,7 +381,7 @@ static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, break; if (!(entry->gate_mask & BIT(tc)) || - packet_transmit_time > entry->interval) + packet_transmit_time > get_interval(entry, sched)) continue; txtime = entry->next_txtime; @@ -540,7 +559,8 @@ static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) * interval starts. */ if (ktime_after(transmit_end_time, interval_end)) - entry->next_txtime = ktime_add(interval_start, sched->cycle_time); + entry->next_txtime = + ktime_add(interval_start, get_cycle_time(sched)); } while (sched_changed || ktime_after(transmit_end_time, interval_end)); entry->next_txtime = transmit_end_time; @@ -1033,6 +1053,7 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer) oper->cycle_end_time = new_base_time; end_time = new_base_time; + next->correction_active = true; update_open_gate_duration(next, oper, num_tc, new_gate_duration); @@ -1133,6 +1154,7 @@ static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, } entry->interval = interval; + entry->correction_active = false; return 0; }