From patchwork Thu Jan 28 11:51:35 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Huazhong Tan X-Patchwork-Id: 12053499 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id A5114C433DB for ; Thu, 28 Jan 2021 11:52:58 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 5658364D9F for ; Thu, 28 Jan 2021 11:52:58 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229893AbhA1Lw5 (ORCPT ); Thu, 28 Jan 2021 06:52:57 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:11528 "EHLO szxga05-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229551AbhA1Lwv (ORCPT ); Thu, 28 Jan 2021 06:52:51 -0500 Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga05-in.huawei.com (SkyGuard) with ESMTP id 4DRJhG3H0czjFYB; Thu, 28 Jan 2021 19:50:54 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.498.0; Thu, 28 Jan 2021 19:52:01 +0800 From: Huazhong Tan To: CC: , , , , , , Guangbin Huang , Huazhong Tan Subject: [PATCH V2 net-next 1/2] net: hns3: add interfaces to query information of tm priority/qset Date: Thu, 28 Jan 2021 19:51:35 +0800 Message-ID: <1611834696-56207-2-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1611834696-56207-1-git-send-email-tanhuazhong@huawei.com> References: <1611834696-56207-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org From: Guangbin Huang Add some interfaces to get information of tm priority and qset, then they can be used by debugfs. Signed-off-by: Guangbin Huang Signed-off-by: Huazhong Tan --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 1 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 186 +++++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 48 +++++- 3 files changed, 234 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index edfadb5..f861bdb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -160,6 +160,7 @@ enum hclge_opcode_type { HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + HCLGE_OPC_TM_NODES = 0x0816, HCLGE_OPC_ETS_TC_WEIGHT = 0x0843, HCLGE_OPC_QSET_DFX_STS = 0x0844, HCLGE_OPC_PRI_DFX_STS = 0x0845, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 82742a6..216ab1e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1616,3 +1616,189 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev) return hclge_tm_bp_setup(hdev); } + +int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { + /* Each PF has 8 qsets and each VF has 1 qset */ + *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev); + return 0; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset num, ret = %d\n", ret); + return ret; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + *qset_num = le16_to_cpu(nodes->qset_num); + return 0; +} + +int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { + *pri_num = HCLGE_TM_PF_MAX_PRI_NUM; + return 0; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pri num, ret = %d\n", ret); + return ret; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + *pri_num = nodes->pri_num; + return 0; +} + +int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, + u8 *link_vld) +{ + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + map->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset map priority, ret = %d\n", ret); + return ret; + } + + *priority = map->priority; + *link_vld = map->link_vld; + return 0; +} + +int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode) +{ + struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true); + qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; + qs_sch_mode->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset sch mode, ret = %d\n", ret); + return ret; + } + + *mode = qs_sch_mode->sch_mode; + return 0; +} + +int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight) +{ + struct hclge_qs_weight_cmd *qs_weight; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); + qs_weight = (struct hclge_qs_weight_cmd *)desc.data; + qs_weight->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset weight, ret = %d\n", ret); + return ret; + } + + *weight = qs_weight->dwrr; + return 0; +} + +int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode) +{ + struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true); + pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; + pri_sch_mode->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority sch mode, ret = %d\n", ret); + return ret; + } + + *mode = pri_sch_mode->sch_mode; + return 0; +} + +int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight) +{ + struct hclge_priority_weight_cmd *priority_weight; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); + priority_weight = (struct hclge_priority_weight_cmd *)desc.data; + priority_weight->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority weight, ret = %d\n", ret); + return ret; + } + + *weight = priority_weight->dwrr; + return 0; +} + +int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, + enum hclge_opcode_type cmd, + struct hclge_pri_shaper_para *para) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING && + cmd != HCLGE_OPC_TM_PRI_P_SHAPPING) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, cmd, true); + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + shap_cfg_cmd->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority shaper(%#x), ret = %d\n", + cmd, ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = shap_cfg_cmd->flag; + para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate); + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 5498d73..d33cb04 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -21,6 +21,9 @@ #define HCLGE_ETHER_MAX_RATE 100000 +#define HCLGE_TM_PF_MAX_PRI_NUM 8 +#define HCLGE_TM_PF_MAX_QSET_NUM 8 + struct hclge_pg_to_pri_link_cmd { u8 pg_id; u8 rsvd1[3]; @@ -65,6 +68,18 @@ struct hclge_priority_weight_cmd { u8 dwrr; }; +struct hclge_pri_sch_mode_cfg_cmd { + u8 pri_id; + u8 rsvd[3]; + u8 sch_mode; +}; + +struct hclge_qs_sch_mode_cfg_cmd { + __le16 qs_id; + u8 rsvd[2]; + u8 sch_mode; +}; + struct hclge_qs_weight_cmd { __le16 qs_id; u8 dwrr; @@ -173,6 +188,27 @@ struct hclge_shaper_ir_para { u8 ir_s; /* IR_S parameter of IR shaper */ }; +struct hclge_tm_nodes_cmd { + u8 pg_base_id; + u8 pri_base_id; + __le16 qset_base_id; + __le16 queue_base_id; + u8 pg_num; + u8 pri_num; + __le16 qset_num; + __le16 queue_num; +}; + +struct hclge_pri_shaper_para { + u8 ir_b; + u8 ir_u; + u8 ir_s; + u8 bs_b; + u8 bs_s; + u8 flag; + u32 rate; +}; + #define hclge_tm_set_field(dest, string, val) \ hnae3_set_field((dest), \ (HCLGE_TM_SHAP_##string##_MSK), \ @@ -195,5 +231,15 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); - +int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num); +int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num); +int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, + u8 *link_vld); +int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode); +int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight); +int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode); +int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight); +int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, + enum hclge_opcode_type cmd, + struct hclge_pri_shaper_para *para); #endif From patchwork Thu Jan 28 11:51:36 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Huazhong Tan X-Patchwork-Id: 12053501 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id D1DC1C433E9 for ; Thu, 28 Jan 2021 11:53:00 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 7922464D9F for ; Thu, 28 Jan 2021 11:53:00 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231134AbhA1Lw7 (ORCPT ); Thu, 28 Jan 2021 06:52:59 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:11529 "EHLO szxga05-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229852AbhA1Lw4 (ORCPT ); Thu, 28 Jan 2021 06:52:56 -0500 Received: from DGGEMS413-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga05-in.huawei.com (SkyGuard) with ESMTP id 4DRJhG31qYzjFTt; Thu, 28 Jan 2021 19:50:54 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS413-HUB.china.huawei.com (10.3.19.213) with Microsoft SMTP Server id 14.3.498.0; Thu, 28 Jan 2021 19:52:02 +0800 From: Huazhong Tan To: CC: , , , , , , Guangbin Huang , Huazhong Tan Subject: [PATCH V2 net-next 2/2] net: hns3: add debugfs support for tm nodes, priority and qset info Date: Thu, 28 Jan 2021 19:51:36 +0800 Message-ID: <1611834696-56207-3-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1611834696-56207-1-git-send-email-tanhuazhong@huawei.com> References: <1611834696-56207-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org From: Guangbin Huang In order to query tm info of nodes, priority and qset for debugging, adds three debugfs files tm_nodes, tm_priority and tm_qset in newly created tm directory. Unlike previous debugfs commands, these three files just support read ops, so they only support to use cat command to dump their info. The new tm file style is acccording to suggestion from Jakub Kicinski's opinion as link https://lkml.org/lkml/2020/9/29/2101. Signed-off-by: Guangbin Huang Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 8 ++ drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 55 +++++++- .../ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 153 +++++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 1 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 + 5 files changed, 218 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index a7daf6d..fe09cf6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -465,6 +465,8 @@ struct hnae3_ae_dev { * Delete clsflower rule * cls_flower_active * Check if any cls flower rule exist + * dbg_read_cmd + * Execute debugfs read command. */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -620,6 +622,8 @@ struct hnae3_ae_ops { int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id, u16 flow_id, struct flow_keys *fkeys); int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf); + int (*dbg_read_cmd)(struct hnae3_handle *handle, const char *cmd_buf, + char *buf, int len); pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev); bool (*get_hw_reset_stat)(struct hnae3_handle *handle); bool (*ae_dev_resetting)(struct hnae3_handle *handle); @@ -777,6 +781,10 @@ struct hnae3_handle { #define hnae3_get_bit(origin, shift) \ hnae3_get_field((origin), (0x1 << (shift)), (shift)) +#define HNAE3_DBG_TM_NODES "tm_nodes" +#define HNAE3_DBG_TM_PRI "tm_priority" +#define HNAE3_DBG_TM_QSET "tm_qset" + int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 9d4e9c0..6978304 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -7,7 +7,7 @@ #include "hnae3.h" #include "hns3_enet.h" -#define HNS3_DBG_READ_LEN 256 +#define HNS3_DBG_READ_LEN 65536 #define HNS3_DBG_WRITE_LEN 1024 static struct dentry *hns3_dbgfs_root; @@ -484,6 +484,42 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, return count; } +static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct hnae3_handle *handle = filp->private_data; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + struct hns3_nic_priv *priv = handle->priv; + char *cmd_buf, *read_buf; + ssize_t size = 0; + int ret = 0; + + if (!filp->f_path.dentry->d_iname) + return -EINVAL; + + read_buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL); + if (!read_buf) + return -ENOMEM; + + cmd_buf = filp->f_path.dentry->d_iname; + + if (ops->dbg_read_cmd) + ret = ops->dbg_read_cmd(handle, cmd_buf, read_buf, + HNS3_DBG_READ_LEN); + + if (ret) { + dev_info(priv->dev, "unknown command\n"); + goto out; + } + + size = simple_read_from_buffer(buffer, count, ppos, read_buf, + strlen(read_buf)); + +out: + kfree(read_buf); + return size; +} + static const struct file_operations hns3_dbg_cmd_fops = { .owner = THIS_MODULE, .open = simple_open, @@ -491,14 +527,31 @@ static const struct file_operations hns3_dbg_cmd_fops = { .write = hns3_dbg_cmd_write, }; +static const struct file_operations hns3_dbg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hns3_dbg_read, +}; + void hns3_dbg_init(struct hnae3_handle *handle) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); const char *name = pci_name(handle->pdev); + struct dentry *entry_dir; handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root); debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle, &hns3_dbg_cmd_fops); + + entry_dir = debugfs_create_dir("tm", handle->hnae3_dbgfs); + if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) + debugfs_create_file(HNAE3_DBG_TM_NODES, 0600, entry_dir, handle, + &hns3_dbg_fops); + debugfs_create_file(HNAE3_DBG_TM_PRI, 0600, entry_dir, handle, + &hns3_dbg_fops); + debugfs_create_file(HNAE3_DBG_TM_QSET, 0600, entry_dir, handle, + &hns3_dbg_fops); } void hns3_dbg_uninit(struct hnae3_handle *handle) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 8f6dea5..8f3fefe 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -800,6 +800,140 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, cmd, ret); } +static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump tm nodes, ret = %d\n", ret); + return ret; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n"); + pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n", + nodes->pg_base_id, nodes->pg_num); + pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n", + nodes->pri_base_id, nodes->pri_num); + pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n", + le16_to_cpu(nodes->qset_base_id), + le16_to_cpu(nodes->qset_num)); + pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n", + le16_to_cpu(nodes->queue_base_id), + le16_to_cpu(nodes->queue_num)); + + return 0; +} + +static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_pri_shaper_para c_shaper_para; + struct hclge_pri_shaper_para p_shaper_para; + u8 pri_num, sch_mode, weight; + char *sch_mode_str; + int pos = 0; + int ret; + u8 i; + + ret = hclge_tm_get_pri_num(hdev, &pri_num); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, + "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B "); + pos += scnprintf(buf + pos, len - pos, + "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U "); + pos += scnprintf(buf + pos, len - pos, + "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n"); + + for (i = 0; i < pri_num; i++) { + ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_pri_weight(hdev, i, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_pri_shaper(hdev, i, + HCLGE_OPC_TM_PRI_C_SHAPPING, + &c_shaper_para); + if (ret) + return ret; + + ret = hclge_tm_get_pri_shaper(hdev, i, + HCLGE_OPC_TM_PRI_P_SHAPPING, + &p_shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + pos += scnprintf(buf + pos, len - pos, + "%04u %4s %3u %3u %3u %3u ", + i, sch_mode_str, weight, c_shaper_para.ir_b, + c_shaper_para.ir_u, c_shaper_para.ir_s); + pos += scnprintf(buf + pos, len - pos, + "%3u %3u %1u %6u ", + c_shaper_para.bs_b, c_shaper_para.bs_s, + c_shaper_para.flag, c_shaper_para.rate); + pos += scnprintf(buf + pos, len - pos, + "%3u %3u %3u %3u %3u ", + p_shaper_para.ir_b, p_shaper_para.ir_u, + p_shaper_para.ir_s, p_shaper_para.bs_b, + p_shaper_para.bs_s); + pos += scnprintf(buf + pos, len - pos, "%1u %6u\n", + p_shaper_para.flag, p_shaper_para.rate); + } + + return 0; +} + +static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len) +{ + u8 priority, link_vld, sch_mode, weight; + char *sch_mode_str; + int ret, pos; + u16 qset_num; + u16 i; + + ret = hclge_tm_get_qset_num(hdev, &qset_num); + if (ret) + return ret; + + pos = scnprintf(buf, len, "ID MAP_PRI LINK_VLD MODE DWRR\n"); + + for (i = 0; i < qset_num; i++) { + ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld); + if (ret) + return ret; + + ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_qset_weight(hdev, i, &weight); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + pos += scnprintf(buf + pos, len - pos, + "%04u %4u %1u %4s %3u\n", + i, priority, link_vld, sch_mode_str, weight); + } + + return 0; +} + static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) { struct hclge_cfg_pause_param_cmd *pause_param; @@ -1591,3 +1725,22 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) return 0; } + +int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf, + char *buf, int len) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (strncmp(cmd_buf, HNAE3_DBG_TM_NODES, + strlen(HNAE3_DBG_TM_NODES)) == 0) + return hclge_dbg_dump_tm_nodes(hdev, buf, len); + else if (strncmp(cmd_buf, HNAE3_DBG_TM_PRI, + strlen(HNAE3_DBG_TM_PRI)) == 0) + return hclge_dbg_dump_tm_pri(hdev, buf, len); + else if (strncmp(cmd_buf, HNAE3_DBG_TM_QSET, + strlen(HNAE3_DBG_TM_QSET)) == 0) + return hclge_dbg_dump_tm_qset(hdev, buf, len); + + return -EINVAL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c242883..16ccb1a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -11850,6 +11850,7 @@ static const struct hnae3_ae_ops hclge_ops = { .enable_fd = hclge_enable_fd, .add_arfs_entry = hclge_add_fd_entry_by_arfs, .dbg_run_cmd = hclge_dbg_run_cmd, + .dbg_read_cmd = hclge_dbg_read_cmd, .handle_hw_ras_error = hclge_handle_hw_ras_error, .get_hw_reset_stat = hclge_get_hw_reset_stat, .ae_dev_resetting = hclge_ae_dev_resetting, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index ca46bc9..32e5f82 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1006,6 +1006,8 @@ int hclge_vport_start(struct hclge_vport *vport); void hclge_vport_stop(struct hclge_vport *vport); int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf); +int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf, + char *buf, int len); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); int hclge_notify_client(struct hclge_dev *hdev, enum hnae3_reset_notify_type type);