diff mbox series

[net-next,14/20] net: ethernet: qualcomm: Add PPE AC(admission control) function

Message ID 20240110114033.32575-15-quic_luoj@quicinc.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series net: ethernet: Add qcom PPE driver | expand

Checks

Context Check Description
netdev/series_format fail Series longer than 15 patches (and no cover letter)
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 23 this patch: 23
netdev/cc_maintainers success CCed 0 of 0 maintainers
netdev/build_clang fail Errors and warnings before: 38 this patch: 40
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 23 this patch: 23
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Luo Jie Jan. 10, 2024, 11:40 a.m. UTC
The PPE AC function is for configuring the threshold to drop packet
from queue.

In addition, the back pressure from EDMA ring to PPE queue function
can be configured, which is used by the EDMA driver to enable the
back pressure feature.

Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
---
 drivers/net/ethernet/qualcomm/ppe/ppe_ops.c  | 182 +++++++++++++++++++
 drivers/net/ethernet/qualcomm/ppe/ppe_ops.h  |  47 +++++
 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h |  24 +++
 3 files changed, 253 insertions(+)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c
index 0398a36d680a..b4f46ad2be59 100644
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c
@@ -429,6 +429,183 @@  static int ppe_rss_hash_config_set(struct ppe_device *ppe_dev,
 	return 0;
 }
 
+static int ppe_queue_ac_threshold_set(struct ppe_device *ppe_dev,
+				      int queue,
+				      struct ppe_queue_ac_threshold ac_threshold)
+{
+	union ppe_ac_uni_queue_cfg_u uni_queue_cfg;
+
+	if (queue >= PPE_AC_UNI_QUEUE_CFG_TBL_NUM)
+		return -EINVAL;
+
+	memset(&uni_queue_cfg, 0, sizeof(uni_queue_cfg));
+	ppe_read_tbl(ppe_dev, PPE_AC_UNI_QUEUE_CFG_TBL +
+		     PPE_AC_UNI_QUEUE_CFG_TBL_INC * queue,
+		     uni_queue_cfg.val, sizeof(uni_queue_cfg.val));
+
+	uni_queue_cfg.bf.wred_en = ac_threshold.wred_enable;
+	uni_queue_cfg.bf.color_aware = ac_threshold.color_enable;
+	uni_queue_cfg.bf.shared_dynamic = ac_threshold.dynamic;
+	uni_queue_cfg.bf.shared_weight = ac_threshold.shared_weight;
+	uni_queue_cfg.bf.shared_ceiling = ac_threshold.ceiling;
+	uni_queue_cfg.bf.gap_grn_grn_min = ac_threshold.green_min_off;
+	uni_queue_cfg.bf.gap_grn_yel_max = ac_threshold.yel_max_off;
+	uni_queue_cfg.bf.gap_grn_yel_min_0 = ac_threshold.yel_min_off & 0x3ff;
+	uni_queue_cfg.bf.gap_grn_yel_min_1 = (ac_threshold.yel_min_off >> 10) & BIT(0);
+	uni_queue_cfg.bf.gap_grn_red_max = ac_threshold.red_max_off;
+	uni_queue_cfg.bf.gap_grn_red_min = ac_threshold.red_min_off;
+	uni_queue_cfg.bf.red_resume_0 = ac_threshold.red_resume_off & 0x1ff;
+	uni_queue_cfg.bf.red_resume_1 = ac_threshold.red_resume_off >> 9 & GENMASK(1, 0);
+	uni_queue_cfg.bf.yel_resume = ac_threshold.yel_resume_off;
+	uni_queue_cfg.bf.grn_resume = ac_threshold.green_resume_off;
+
+	return ppe_write_tbl(ppe_dev, PPE_AC_UNI_QUEUE_CFG_TBL +
+			     PPE_AC_UNI_QUEUE_CFG_TBL_INC * queue,
+			     uni_queue_cfg.val, sizeof(uni_queue_cfg.val));
+}
+
+static int ppe_queue_ac_threshold_get(struct ppe_device *ppe_dev,
+				      int queue,
+				      struct ppe_queue_ac_threshold *ac_threshold)
+{
+	union ppe_ac_uni_queue_cfg_u uni_queue_cfg;
+
+	if (queue >= PPE_AC_UNI_QUEUE_CFG_TBL_NUM)
+		return -EINVAL;
+
+	memset(&uni_queue_cfg, 0, sizeof(uni_queue_cfg));
+	ppe_read_tbl(ppe_dev, PPE_AC_UNI_QUEUE_CFG_TBL +
+		     PPE_AC_UNI_QUEUE_CFG_TBL_INC * queue,
+		     uni_queue_cfg.val, sizeof(uni_queue_cfg.val));
+
+	ac_threshold->wred_enable = uni_queue_cfg.bf.wred_en;
+	ac_threshold->color_enable = uni_queue_cfg.bf.color_aware;
+	ac_threshold->dynamic = uni_queue_cfg.bf.shared_dynamic;
+	ac_threshold->shared_weight = uni_queue_cfg.bf.shared_weight;
+	ac_threshold->ceiling = uni_queue_cfg.bf.shared_ceiling;
+	ac_threshold->green_min_off = uni_queue_cfg.bf.gap_grn_grn_min;
+	ac_threshold->yel_max_off = uni_queue_cfg.bf.gap_grn_yel_max;
+	ac_threshold->yel_min_off = (uni_queue_cfg.bf.gap_grn_yel_min_0 & 0x3ff) |
+				    (uni_queue_cfg.bf.gap_grn_yel_min_1 << 10 & BIT(0));
+	ac_threshold->red_max_off = uni_queue_cfg.bf.gap_grn_red_max;
+	ac_threshold->red_min_off = uni_queue_cfg.bf.gap_grn_red_min;
+	ac_threshold->red_resume_off = (uni_queue_cfg.bf.red_resume_0 & 0x1ff) |
+				       (uni_queue_cfg.bf.red_resume_1 << 9 & GENMASK(1, 0));
+	ac_threshold->yel_resume_off = uni_queue_cfg.bf.yel_resume;
+	ac_threshold->green_resume_off = uni_queue_cfg.bf.grn_resume;
+
+	return 0;
+}
+
+static int ppe_queue_ac_ctrl_set(struct ppe_device *ppe_dev,
+				 u32 index,
+				 struct ppe_queue_ac_ctrl ac_ctrl)
+{
+	union ppe_ac_uni_queue_cfg_u uni_queue_cfg;
+	union ppe_ac_mul_queue_cfg_u mul_queue_cfg;
+	union ppe_ac_grp_cfg_u	grp_cfg;
+	int ret;
+
+	memset(&grp_cfg, 0, sizeof(grp_cfg));
+	memset(&uni_queue_cfg, 0, sizeof(uni_queue_cfg));
+	memset(&mul_queue_cfg, 0, sizeof(mul_queue_cfg));
+
+	ret = FIELD_GET(PPE_QUEUE_AC_VALUE_MASK, index);
+	if (FIELD_GET(PPE_QUEUE_AC_TYPE_MASK, index) == PPE_QUEUE_AC_TYPE_GROUP) {
+		ppe_read_tbl(ppe_dev, PPE_AC_GRP_CFG_TBL +
+			     PPE_AC_GRP_CFG_TBL_INC * ret,
+			     grp_cfg.val, sizeof(grp_cfg.val));
+
+		grp_cfg.bf.ac_en = ac_ctrl.ac_en;
+		grp_cfg.bf.force_ac_en = ac_ctrl.ac_fc_en;
+
+		ppe_write_tbl(ppe_dev, PPE_AC_GRP_CFG_TBL +
+			      PPE_AC_GRP_CFG_TBL_INC * ret,
+			      grp_cfg.val, sizeof(grp_cfg.val));
+	} else {
+		if (ret > PPE_QUEUE_AC_UCAST_MAX) {
+			ppe_read_tbl(ppe_dev, PPE_AC_MUL_QUEUE_CFG_TBL +
+				     PPE_AC_MUL_QUEUE_CFG_TBL_INC * ret,
+				     mul_queue_cfg.val, sizeof(mul_queue_cfg.val));
+
+			mul_queue_cfg.bf.ac_en = ac_ctrl.ac_en;
+			mul_queue_cfg.bf.force_ac_en = ac_ctrl.ac_fc_en;
+
+			ppe_write_tbl(ppe_dev, PPE_AC_MUL_QUEUE_CFG_TBL +
+				      PPE_AC_MUL_QUEUE_CFG_TBL_INC * ret,
+				      mul_queue_cfg.val, sizeof(mul_queue_cfg.val));
+		} else {
+			ppe_read_tbl(ppe_dev, PPE_AC_UNI_QUEUE_CFG_TBL +
+				     PPE_AC_UNI_QUEUE_CFG_TBL_INC * ret,
+				     uni_queue_cfg.val, sizeof(uni_queue_cfg.val));
+
+			uni_queue_cfg.bf.ac_en = ac_ctrl.ac_en;
+			uni_queue_cfg.bf.force_ac_en = ac_ctrl.ac_fc_en;
+
+			ppe_write_tbl(ppe_dev, PPE_AC_UNI_QUEUE_CFG_TBL +
+				      PPE_AC_UNI_QUEUE_CFG_TBL_INC * ret,
+				      uni_queue_cfg.val, sizeof(uni_queue_cfg.val));
+		}
+	}
+
+	return 0;
+}
+
+static int ppe_queue_ac_ctrl_get(struct ppe_device *ppe_dev,
+				 u32 index,
+				 struct ppe_queue_ac_ctrl *ac_ctrl)
+{
+	union ppe_ac_uni_queue_cfg_u uni_queue_cfg;
+	union ppe_ac_mul_queue_cfg_u mul_queue_cfg;
+	union ppe_ac_grp_cfg_u	grp_cfg;
+	int ret;
+
+	memset(&grp_cfg, 0, sizeof(grp_cfg));
+	memset(&uni_queue_cfg, 0, sizeof(uni_queue_cfg));
+	memset(&mul_queue_cfg, 0, sizeof(mul_queue_cfg));
+
+	ret = FIELD_GET(PPE_QUEUE_AC_VALUE_MASK, index);
+	if (FIELD_GET(PPE_QUEUE_AC_TYPE_MASK, index) == PPE_QUEUE_AC_TYPE_GROUP) {
+		ppe_read_tbl(ppe_dev, PPE_AC_GRP_CFG_TBL +
+			     PPE_AC_GRP_CFG_TBL_INC * ret,
+			     grp_cfg.val, sizeof(grp_cfg.val));
+
+		ac_ctrl->ac_en = grp_cfg.bf.ac_en;
+		ac_ctrl->ac_fc_en = grp_cfg.bf.force_ac_en;
+	} else {
+		if (ret > PPE_QUEUE_AC_UCAST_MAX) {
+			ppe_read_tbl(ppe_dev, PPE_AC_MUL_QUEUE_CFG_TBL +
+				     PPE_AC_MUL_QUEUE_CFG_TBL_INC * ret,
+				     mul_queue_cfg.val, sizeof(mul_queue_cfg.val));
+
+			ac_ctrl->ac_en = mul_queue_cfg.bf.ac_en;
+			ac_ctrl->ac_fc_en = mul_queue_cfg.bf.force_ac_en;
+		} else {
+			ppe_read_tbl(ppe_dev, PPE_AC_UNI_QUEUE_CFG_TBL +
+				     PPE_AC_UNI_QUEUE_CFG_TBL_INC * ret,
+				     uni_queue_cfg.val, sizeof(uni_queue_cfg.val));
+
+			ac_ctrl->ac_en = uni_queue_cfg.bf.ac_en;
+			ac_ctrl->ac_fc_en = uni_queue_cfg.bf.force_ac_en;
+		}
+	}
+
+	return 0;
+}
+
+static int ppe_ring_queue_map_set(struct ppe_device *ppe_dev,
+				  int ring_id,
+				  u32 *queue_map)
+{
+	union ppe_ring_q_map_cfg_u ring_q_map;
+
+	memset(&ring_q_map, 0, sizeof(ring_q_map));
+
+	memcpy(ring_q_map.val, queue_map, sizeof(ring_q_map.val));
+	return ppe_write_tbl(ppe_dev, PPE_RING_Q_MAP_TBL + PPE_RING_Q_MAP_TBL_INC * ring_id,
+			     ring_q_map.val, sizeof(ring_q_map.val));
+}
+
 static const struct ppe_queue_ops qcom_ppe_queue_config_ops = {
 	.queue_scheduler_set = ppe_queue_scheduler_set,
 	.queue_scheduler_get = ppe_queue_scheduler_get,
@@ -437,6 +614,11 @@  static const struct ppe_queue_ops qcom_ppe_queue_config_ops = {
 	.queue_ucast_pri_class_set = ppe_queue_ucast_pri_class_set,
 	.queue_ucast_hash_class_set = ppe_queue_ucast_hash_class_set,
 	.rss_hash_config_set = ppe_rss_hash_config_set,
+	.queue_ac_threshold_set = ppe_queue_ac_threshold_set,
+	.queue_ac_threshold_get = ppe_queue_ac_threshold_get,
+	.queue_ac_ctrl_set = ppe_queue_ac_ctrl_set,
+	.queue_ac_ctrl_get = ppe_queue_ac_ctrl_get,
+	.ring_queue_map_set = ppe_ring_queue_map_set,
 };
 
 const struct ppe_queue_ops *ppe_queue_config_ops_get(void)
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h
index da0f37323042..9d069d73e257 100644
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h
@@ -14,6 +14,12 @@ 
 #define PPE_QUEUE_HASH_MAX		256
 #define PPE_RSS_HASH_MODE_IPV4		BIT(0)
 #define PPE_RSS_HASH_MODE_IPV6		BIT(1)
+#define PPE_QUEUE_AC_TYPE_QUEUE			0
+#define PPE_QUEUE_AC_TYPE_GROUP			1
+#define PPE_QUEUE_AC_UCAST_MAX			255
+#define PPE_QUEUE_AC_VALUE_MASK			GENMASK(23, 0)
+#define PPE_QUEUE_AC_TYPE_MASK			GENMASK(31, 24)
+#define PPE_RING_MAPPED_BP_QUEUE_WORD_COUNT	10
 
 /* PPE hardware QoS configurations used to dispatch the packet passed
  * through PPE, the scheduler supports DRR(deficit round robin with the
@@ -167,6 +173,32 @@  struct ppe_rss_hash_cfg {
 	u8 hash_fin_outer[5];
 };
 
+/* PPE queue threshold config for the admission control, the threshold
+ * decides the length of queue, the threshold can be configured statically
+ * or dynamically changed with the free buffer.
+ */
+struct ppe_queue_ac_threshold {
+	bool color_enable;
+	bool wred_enable;
+	bool dynamic;
+	int shared_weight;
+	int green_min_off;
+	int yel_max_off;
+	int yel_min_off;
+	int red_max_off;
+	int red_min_off;
+	int green_resume_off;
+	int yel_resume_off;
+	int red_resume_off;
+	int ceiling;
+};
+
+/* Admission control status of PPE queue. */
+struct ppe_queue_ac_ctrl {
+	bool ac_en;
+	bool ac_fc_en;
+};
+
 /* The operations are used to configure the PPE queue related resource */
 struct ppe_queue_ops {
 	int (*queue_scheduler_set)(struct ppe_device *ppe_dev,
@@ -198,6 +230,21 @@  struct ppe_queue_ops {
 	int (*rss_hash_config_set)(struct ppe_device *ppe_dev,
 				   int mode,
 				   struct ppe_rss_hash_cfg hash_cfg);
+	int (*queue_ac_threshold_set)(struct ppe_device *ppe_dev,
+				      int queue,
+				      struct ppe_queue_ac_threshold ac_threshold);
+	int (*queue_ac_threshold_get)(struct ppe_device *ppe_dev,
+				      int queue,
+				      struct ppe_queue_ac_threshold *ac_threshold);
+	int (*queue_ac_ctrl_set)(struct ppe_device *ppe_dev,
+				 u32 index,
+				 struct ppe_queue_ac_ctrl ac_ctrl);
+	int (*queue_ac_ctrl_get)(struct ppe_device *ppe_dev,
+				 u32 index,
+				 struct ppe_queue_ac_ctrl *ac_ctrl);
+	int (*ring_queue_map_set)(struct ppe_device *ppe_dev,
+				  int ring_id,
+				  u32 *queue_map);
 };
 
 const struct ppe_queue_ops *ppe_queue_config_ops_get(void);
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
index b42089599cc9..ef12037ffed5 100644
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -238,6 +238,30 @@  union ppe_mru_mtu_ctrl_cfg_u {
 #define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN			GENMASK(1, 0)
 #define PPE_L0_COMP_CFG_TBL_DRR_METER_LEN			GENMASK(3, 2)
 
+#define PPE_RING_Q_MAP_TBL					0x42a000
+#define PPE_RING_Q_MAP_TBL_NUM					24
+#define PPE_RING_Q_MAP_TBL_INC					0x40
+
+/* The queue bitmap for the back pressure from EDAM RX ring to PPE queue */
+struct ppe_ring_q_map_cfg {
+	u32 queue_bitmap_0;
+	u32 queue_bitmap_1;
+	u32 queue_bitmap_2;
+	u32 queue_bitmap_3;
+	u32 queue_bitmap_4;
+	u32 queue_bitmap_5;
+	u32 queue_bitmap_6;
+	u32 queue_bitmap_7;
+	u32 queue_bitmap_8;
+	u32 queue_bitmap_9:12,
+	    res0:20;
+};
+
+union ppe_ring_q_map_cfg_u {
+	u32 val[10];
+	struct ppe_ring_q_map_cfg bf;
+};
+
 #define PPE_DEQ_OPR_TBL						0x430000
 #define PPE_DEQ_OPR_TBL_NUM					300
 #define PPE_DEQ_OPR_TBL_INC					0x10