Message ID | 1527271321-31583-3-git-send-email-daidavid1@codeaurora.org (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Hi David, Thank you for the patch! On 05/25/2018 09:02 PM, David Dai wrote: > Introduce Qualcomm SDM845 specific provider driver using the > interconnect framework. > > Change-Id: I18194854a6029e814b2deedc1f5ebae6dffd42bf Please remove the change-id from all patches. > Signed-off-by: David Dai <daidavid1@codeaurora.org> > --- > drivers/interconnect/qcom/Kconfig | 6 + > drivers/interconnect/qcom/Makefile | 1 + > drivers/interconnect/qcom/qcom-icc-ids.h | 142 ++++++ > drivers/interconnect/qcom/sdm845.c | 815 +++++++++++++++++++++++++++++++ > 4 files changed, 964 insertions(+) > create mode 100644 drivers/interconnect/qcom/qcom-icc-ids.h > create mode 100644 drivers/interconnect/qcom/sdm845.c > > diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig > index 86465dc..8901181 100644 > --- a/drivers/interconnect/qcom/Kconfig > +++ b/drivers/interconnect/qcom/Kconfig > @@ -9,3 +9,9 @@ config INTERCONNECT_QCOM_MSM8916 > depends on INTERCONNECT_QCOM > help > This is a driver for the Qualcomm Network-on-Chip on msm8916-based platforms. > + > +config INTERCONNECT_QCOM_SDM845 > + tristate "Qualcomm SDM845 interconnect driver" > + depends on INTERCONNECT_QCOM maybe also: depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST > + help > + This is a driver for the Qualcomm Network-on-Chip on sdm845-based platforms. > diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile > index e8b24c3..9b08d01 100644 > --- a/drivers/interconnect/qcom/Makefile > +++ b/drivers/interconnect/qcom/Makefile > @@ -2,3 +2,4 @@ > obj-y += smd-rpm.o > > obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += msm8916.o > +obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += sdm845.o > diff --git a/drivers/interconnect/qcom/qcom-icc-ids.h b/drivers/interconnect/qcom/qcom-icc-ids.h > new file mode 100644 > index 0000000..527c299 > --- /dev/null > +++ b/drivers/interconnect/qcom/qcom-icc-ids.h In order to use these ids in DT files, i have moved them recently into include/dt-bindings/interconnect/qcom.h I will resend my updated patches shortly. > @@ -0,0 +1,142 @@ > +// SPDX-License-Identifier: GPL-2.0 The preferred format for header files is: /* SPDX-License-Identifier: GPL-2.0 */ > +/* > + * Copyright (c) 2018, The Linux Foundation. All rights reserved. > + * > + */ > + > +#ifndef _QCOM_ICC_IDS_H > +#define _QCOM_ICC_IDS_H > + > +#define MASTER_APPSS_PROC 0 > +#define MASTER_TCU_0 1 > +#define MASTER_IPA_CORE 2 > +#define MASTER_LLCC 3 [..] > +#define SLAVE_SERVICE_GNOC 582 > +#define SLAVE_SERVICE_MEM_NOC 583 > +#define SLAVE_SERVICE_MNOC 584 > +#define SLAVE_SERVICE_SNOC 585 > +#define SLAVE_QDSS_STM 586 > +#define SLAVE_TCU 587 Please use just tabs with no spaces. > +#endif > diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c > new file mode 100644 > index 0000000..f5a77fe > --- /dev/null > +++ b/drivers/interconnect/qcom/sdm845.c > @@ -0,0 +1,815 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * Copyright (c) 2018, The Linux Foundation. All rights reserved. > + * > + */ > + > +#include <linux/device.h> > +#include <linux/io.h> > +#include <linux/interconnect.h> > +#include <linux/interconnect-provider.h> > +#include <linux/module.h> > +#include <linux/of_device.h> > +#include <linux/of_platform.h> > +#include <linux/platform_device.h> > +#include <linux/slab.h> > +#include <linux/sort.h> > +#include <linux/debugfs.h> This header seems unused. > + > +#include <soc/qcom/rpmh.h> > +#include <soc/qcom/tcs.h> > +#include <soc/qcom/cmd-db.h> Alphabetical order please. > + > +#include "qcom-icc-ids.h" > + > +#define BCM_TCS_CMD_COMMIT_SHFT 30 > +#define BCM_TCS_CMD_COMMIT_MASK 0x40000000 > +#define BCM_TCS_CMD_VALID_SHFT 29 > +#define BCM_TCS_CMD_VALID_MASK 0x20000000 > +#define BCM_TCS_CMD_VOTE_X_SHFT 14 > +#define BCM_TCS_CMD_VOTE_MASK 0x3FFF > +#define BCM_TCS_CMD_VOTE_Y_SHFT 0 > +#define BCM_TCS_CMD_VOTE_Y_MASK 0xFFFC000 Please use lower case for hexadecimals. > + > +#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \ > + (((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\ > + ((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\ > + ((vote_x & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) |\ > + ((vote_y & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) > + > +#define to_qcom_provider(_provider) \ > + container_of(_provider, struct qcom_icc_provider, provider) > + > +#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \ > + _numlinks, ...) \ > + static struct qcom_icc_node _name = { \ > + .id = _id, \ > + .name = #_name, \ > + .channels = _channels, \ > + .buswidth = _buswidth, \ > + .num_links = _numlinks, \ > + .links = { __VA_ARGS__ }, \ > + } > + > +#define DEFINE_QBCM(_name, _bcmname, _numnodes, ...) \ > + static struct qcom_icc_bcm _name = { \ > + .num_nodes = _numnodes, \ > + .name = _bcmname, \ > + .nodes = { __VA_ARGS__ }, \ > + } > + > +static struct device *qcom_dev; > + > +struct qcom_icc_provider { > + struct icc_provider provider; > + void __iomem *base; > + struct rpmh_client *rpmh_client; > + struct qcom_icc_bcm **bcms; > + size_t num_bcms; > +}; > + > +/** > + * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM) > + * @unit: bcm threshold values are in magnitudes of this > + * @width: prototype width > + * @vcd: virtual clock domain that this bcm belongs to > +*/ Please align to the previous line. > + > +struct bcm_db { > + u32 unit; > + u16 width; > + u8 vcd; > + u8 reserved; > +}; > + > +#define SDM845_MAX_LINKS 43 > +#define SDM845_MAX_BCMS 30 > +#define SDM845_MAX_BCM_PER_NODE 2 > +#define SDM845_MAX_VCD 10 > + > +/** > + * struct qcom_icc_node - Qualcomm specific interconnect nodes > + * @name: the node name used in debugfs > + * @links: an array of nodes where we can go next while traversing > + * @id: a unique node identifier > + * @num_links: the total number of @links > + * @channels: num of channels at this node > + * @buswidth: width of the interconnect between a node and the bus > + * @sum_avg: current sum aggregate value of all avg bw requests > + * @max_peak: current max aggregate value of all peak bw requests > + * @bcms: list of bcms associated with this logical node > + * @num_bcm: num of @bcms > + */ > +struct qcom_icc_node { > + unsigned char *name; > + u16 links[SDM845_MAX_LINKS]; > + u16 id; > + u16 num_links; > + u16 channels; > + u16 buswidth; > + u64 sum_avg; > + u64 max_peak; > + struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE]; > + size_t num_bcms; > +}; > + > +/** > + * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes > + * known as Bus Clock Manager(BCM) > + * @name: the bcm node name used to fetch BCM data from command db > + * @type: latency or bandwidth bcm > + * @addr: address offsets used when voting to RPMH > + * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm > + * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm Are these used when @type is latency? > + * @dirty: flag used to indicate whether or bcm needs to be committed > + * @aux_data: auxiliary data used when calculating threshold values and > + * communicating with RPMh > + * @list: used to link to other bcms when compiling lists for commit > + * @num_nodes: total number of @num_nodes > + * @nodes: list of qcom_icc_nodes that this BCM encapsulates > + */ > + > +struct qcom_icc_bcm { > + unsigned char *name; > + u32 type; > + u32 addr; > + u64 vote_x; > + u64 vote_y; > + bool dirty; > + struct bcm_db aux_data; > + struct list_head list; > + size_t num_nodes; > + struct qcom_icc_node *nodes[]; > +}; > + > +struct qcom_icc_fabric { > + struct qcom_icc_node **nodes; > + size_t num_nodes; > + u32 base_offset; > + u32 qos_offset; > +}; Unused > + > +struct qcom_icc_desc { > + struct qcom_icc_node **nodes; > + size_t num_nodes; > + struct qcom_icc_bcm **bcms; > + size_t num_bcms; > +}; > + > +DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC); > +DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC); > +DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC); > +DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC); > +DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC); > +DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC); > +DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC); > +DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC); > +DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC); > +DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC); > +DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC); > +DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC); > +DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC); > +DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC); > +DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC); > +DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC); > +DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC); > +DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC); > +DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); > +DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); > +DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); > +DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC); > +DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); > +DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC); > +DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); > +DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG); > +DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC); > +DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC); > +DEFINE_QNODE(ipa_core_master, MASTER_IPA_CORE, 1, 8, 1, SLAVE_IPA_CORE); > +DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1); > +DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); > +DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC); > +DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC); > +DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); > +DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); > +DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC); > +DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); > +DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); > +DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC); > +DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); > +DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); > +DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); > +DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); > +DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); > +DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); > +DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); > +DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); > +DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC); > +DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC); > +DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); > +DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); > +DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); > +DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); > +DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM); > +DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); > +DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); > +DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC); > +DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0); > +DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); > +DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC); > +DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); > +DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0); > +DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0); > +DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG); > +DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG); > +DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0); > +DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0); > +DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0); > +DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC); > +DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0); > +DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0); > +DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0); > +DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG); > +DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0); > +DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0); > +DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0); > +DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0); > +DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0); > +DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0); > +DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG); > +DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0); > +DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0); > +DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0); > +DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0); > +DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0); > +DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0); > +DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0); > +DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0); > +DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC); > +DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0); > +DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0); > +DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG); > +DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC); > +DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC); > +DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0); > +DEFINE_QNODE(ipa_core_slave, SLAVE_IPA_CORE, 1, 8, 0); > +DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0); > +DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0); > +DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0); > +DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC); > +DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC); > +DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0); > +DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC); > +DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC); > +DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0); > +DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0); > +DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC); > +DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC); > +DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC); > +DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0); > +DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0); > +DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0); > +DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0); > +DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0); > +DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0); > +DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0); > + > +DEFINE_QBCM(bcm_acv, "ACV", 1, &ebi); > +DEFINE_QBCM(bcm_mc0, "MC0", 1, &ebi); > +DEFINE_QBCM(bcm_sh0, "SH0", 1, &qns_llcc); > +DEFINE_QBCM(bcm_mm0, "MM0", 1, &qns_mem_noc_hf); > +DEFINE_QBCM(bcm_sh1, "SH1", 1, &qns_apps_io); > +DEFINE_QBCM(bcm_mm1, "MM1", 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1); > +DEFINE_QBCM(bcm_sh2, "SH2", 1, &qns_memnoc_snoc); > +DEFINE_QBCM(bcm_mm2, "MM2", 1, &qns2_mem_noc); > +DEFINE_QBCM(bcm_sh3, "SH3", 1, &acm_tcu); > +DEFINE_QBCM(bcm_mm3, "MM3", 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9); > +DEFINE_QBCM(bcm_sh5, "SH5", 1, &qnm_apps); > +DEFINE_QBCM(bcm_sn0, "SN0", 1, &qns_memnoc_sf); > +DEFINE_QBCM(bcm_ce0, "CE0", 1, &qxm_crypto); > +DEFINE_QBCM(bcm_ip0, "IP0", 1, &ipa_core_slave); > +DEFINE_QBCM(bcm_cn0, "CN0", 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc); > +DEFINE_QBCM(bcm_qup0, "QUP0", 2, &qhm_qup1, &qhm_qup2); > +DEFINE_QBCM(bcm_sn1, "SN1", 1, &qxs_imem); > +DEFINE_QBCM(bcm_sn2, "SN2", 1, &qns_memnoc_gc); > +DEFINE_QBCM(bcm_sn3, "SN3", 1, &qns_cnoc); > +DEFINE_QBCM(bcm_sn4, "SN4", 1, &qxm_pimem); > +DEFINE_QBCM(bcm_sn5, "SN5", 1, &xs_qdss_stm); > +DEFINE_QBCM(bcm_sn6, "SN6", 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg); > +DEFINE_QBCM(bcm_sn7, "SN7", 1, &qxs_pcie); > +DEFINE_QBCM(bcm_sn8, "SN8", 1, &qxs_pcie_gen3); > +DEFINE_QBCM(bcm_sn9, "SN9", 2, &srvc_aggre1_noc, &qnm_aggre1_noc); > +DEFINE_QBCM(bcm_sn11, "SN11", 2, &srvc_aggre2_noc, &qnm_aggre2_noc); > +DEFINE_QBCM(bcm_sn12, "SN12", 2, &qnm_gladiator_sodv, &xm_gic); > +DEFINE_QBCM(bcm_sn14, "SN14", 1, &qnm_pcie_anoc); > +DEFINE_QBCM(bcm_sn15, "SN15", 1, &qnm_memnoc); > + > +static struct qcom_icc_node *rsc_hlos_nodes[] = { > + &acm_l3, > + &acm_tcu, > + &ipa_core_master, > + &llcc_mc, > + &pm_gnoc_cfg, > + &qhm_a1noc_cfg, > + &qhm_a2noc_cfg, > + &qhm_cnoc, > + &qhm_memnoc_cfg, > + &qhm_mnoc_cfg, > + &qhm_qdss_bam, > + &qhm_qup1, > + &qhm_qup2, > + &qhm_snoc_cfg, > + &qhm_spdm, > + &qhm_tic, > + &qhm_tsif, > + &qnm_aggre1_noc, > + &qnm_aggre2_noc, > + &qnm_apps, > + &qnm_cnoc, > + &qnm_gladiator_sodv, > + &qnm_memnoc, > + &qnm_mnoc_hf, > + &qnm_mnoc_sf, > + &qnm_pcie_anoc, > + &qnm_snoc, > + &qnm_snoc_gc, > + &qnm_snoc_sf, > + &qxm_camnoc_hf0, > + &qxm_camnoc_hf0_uncomp, > + &qxm_camnoc_hf1, > + &qxm_camnoc_hf1_uncomp, > + &qxm_camnoc_sf, > + &qxm_camnoc_sf_uncomp, > + &qxm_crypto, > + &qxm_gpu, > + &qxm_ipa, > + &qxm_mdp0, > + &qxm_mdp1, > + &qxm_pimem, > + &qxm_rot, > + &qxm_venus0, > + &qxm_venus1, > + &qxm_venus_arm9, > + &xm_gic, > + &xm_pcie3_1, > + &xm_pcie_0, > + &xm_qdss_dap, > + &xm_qdss_etr, > + &xm_sdc2, > + &xm_sdc4, > + &xm_ufs_card, > + &xm_ufs_mem, > + &xm_usb3_0, > + &xm_usb3_1, > + &ebi, > + &ipa_core_slave, > + &qhs_a1_noc_cfg, > + &qhs_a2_noc_cfg, > + &qhs_aop, > + &qhs_aoss, > + &qhs_apss, > + &qhs_camera_cfg, > + &qhs_clk_ctl, > + &qhs_compute_dsp_cfg, > + &qhs_cpr_cx, > + &qhs_crypto0_cfg, > + &qhs_dcc_cfg, > + &qhs_ddrss_cfg, > + &qhs_display_cfg, > + &qhs_glm, > + &qhs_gpuss_cfg, > + &qhs_imem_cfg, > + &qhs_ipa, > + &qhs_llcc, > + &qhs_mdsp_ms_mpu_cfg, > + &qhs_memnoc, > + &qhs_mnoc_cfg, > + &qhs_pcie0_cfg, > + &qhs_pcie_gen3_cfg, > + &qhs_pdm, > + &qhs_phy_refgen_south, > + &qhs_pimem_cfg, > + &qhs_prng, > + &qhs_qdss_cfg, > + &qhs_qupv3_north, > + &qhs_qupv3_south, > + &qhs_sdc2, > + &qhs_sdc4, > + &qhs_snoc_cfg, > + &qhs_spdm, > + &qhs_spss_cfg, > + &qhs_tcsr, > + &qhs_tlmm_north, > + &qhs_tlmm_south, > + &qhs_tsif, > + &qhs_ufs_card_cfg, > + &qhs_ufs_mem_cfg, > + &qhs_usb3_0, > + &qhs_usb3_1, > + &qhs_venus_cfg, > + &qhs_vsense_ctrl_cfg, > + &qns2_mem_noc, > + &qns_a1noc_snoc, > + &qns_a2noc_snoc, > + &qns_apps_io, > + &qns_camnoc_uncomp, > + &qns_cnoc, > + &qns_cnoc_a2noc, > + &qns_gladiator_sodv, > + &qns_gnoc_memnoc, > + &qns_llcc, > + &qns_mem_noc_hf, > + &qns_memnoc_gc, > + &qns_memnoc_sf, > + &qns_memnoc_snoc, > + &qns_pcie_a1noc_snoc, > + &qns_pcie_snoc, > + &qxs_imem, > + &qxs_pcie, > + &qxs_pcie_gen3, > + &qxs_pimem, > + &srvc_aggre1_noc, > + &srvc_aggre2_noc, > + &srvc_cnoc, > + &srvc_gnoc, > + &srvc_memnoc, > + &srvc_mnoc, > + &srvc_snoc, > + &xs_qdss_stm, > + &xs_sys_tcu_cfg, > +}; > + > +static struct qcom_icc_bcm *rsc_hlos_bcms[] = { > + &bcm_acv, > + &bcm_mc0, > + &bcm_sh0, > + &bcm_mm0, > + &bcm_sh1, > + &bcm_mm1, > + &bcm_sh2, > + &bcm_mm2, > + &bcm_sh3, > + &bcm_mm3, > + &bcm_sh5, > + &bcm_sn0, > + &bcm_ce0, > + &bcm_ip0, > + &bcm_cn0, > + &bcm_qup0, > + &bcm_sn1, > + &bcm_sn2, > + &bcm_sn3, > + &bcm_sn4, > + &bcm_sn5, > + &bcm_sn6, > + &bcm_sn7, > + &bcm_sn8, > + &bcm_sn9, > + &bcm_sn11, > + &bcm_sn12, > + &bcm_sn14, > + &bcm_sn15, > +}; > + > +static struct qcom_icc_desc sdm845_rsc_hlos = { > + .nodes = rsc_hlos_nodes, > + .num_nodes = ARRAY_SIZE(rsc_hlos_nodes), > + .bcms = rsc_hlos_bcms, > + .num_bcms = ARRAY_SIZE(rsc_hlos_bcms), > +}; > + > +static int qcom_icc_init(struct icc_node *node) > +{ > + /* TODO: init qos and priority */ > + > + return 0; > +} > + > +static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev) > +{ > + struct bcm_db buf = {0}; > + struct qcom_icc_node *qn; > + int ret, i; > + > + bcm->addr = cmd_db_read_addr(bcm->name); > + if (!bcm->addr) { > + dev_err(dev, "%s could not find RPMh address\n", > + bcm->name); > + return -EINVAL; > + } > + > + if (!cmd_db_read_aux_data_len(bcm->name)) { > + dev_err(dev, "%s command db missing aux data\n", > + bcm->name); > + return -EINVAL; > + } > + > + ret = cmd_db_read_aux_data(bcm->name, (u8 *)&buf, > + sizeof(struct bcm_db)); Please align to the open parenthesis. > + if (ret < 0) { > + dev_err(dev, "%s command db read error (%d)\n", > + bcm->name, ret); > + return ret; > + } > + > + bcm->aux_data = buf; > + > + for (i = 0; i < bcm->num_nodes; i++){ Please add a space before the open brace. > + qn = bcm->nodes[i]; > + qn->bcms[qn->num_bcms] = bcm; > + qn->num_bcms++; > + } > + > + return 0; > +} > + > +static int qcom_tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, > + u64 vote_y, u32 addr, bool commit) > +{ > + int ret = 0; > + bool valid = true; > + > + if (!cmd) > + return ret; > + > + if (vote_x == 0 && vote_y == 0) > + valid = false; > + > + if (vote_x > BCM_TCS_CMD_VOTE_MASK) > + vote_x = BCM_TCS_CMD_VOTE_MASK; > + > + if (vote_y > BCM_TCS_CMD_VOTE_MASK) > + vote_y = BCM_TCS_CMD_VOTE_MASK; > + > + cmd->addr = addr; > + cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y); > + cmd->wait = commit; > + > + return ret; > +} > + > +static void qcom_tcs_list_gen(struct list_head *bcm_list, > + struct tcs_cmd *tcs_list, int *n) > +{ > + struct qcom_icc_bcm *bcm; > + bool commit; > + size_t idx = 0, batch = 0, cur_vcd_size = 0; > + > + memset(n, 0, sizeof(int) * SDM845_MAX_VCD); > + > + list_for_each_entry(bcm, bcm_list, list){ > + commit = false; > + cur_vcd_size++; > + if((bcm->aux_data.vcd != > + list_next_entry(bcm, list)->aux_data.vcd) || > + list_is_last(&bcm->list, bcm_list)) { > + commit = true; > + cur_vcd_size = 0; > + } > + qcom_tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y, > + bcm->addr, commit); > + idx++; > + n[batch]++; > + if (n[batch] >= MAX_RPMH_PAYLOAD && commit == false) { !commit instead of (commit == false) > + n[batch] -= cur_vcd_size; > + batch++; > + n[batch] = cur_vcd_size; > + } > + } > + n[batch+1] = 0; > +} > + > +static void qcom_icc_bcm_aggregate(struct qcom_icc_bcm *bcm) > +{ > + size_t i; > + u64 agg_avg = 0; > + u64 agg_peak = 0; > + > + for (i = 0; i < bcm->num_nodes; i++){ > + agg_avg = max(agg_avg, > + bcm->nodes[i]->sum_avg * bcm->aux_data.width / > + (bcm->nodes[i]->buswidth * bcm->nodes[i]->channels)); > + agg_peak = max(agg_peak, > + bcm->nodes[i]->max_peak * bcm->aux_data.width / > + bcm->nodes[i]->buswidth); > + } > + > + bcm->vote_x = (u64)(agg_avg * 1000ULL / bcm->aux_data.unit); > + bcm->vote_y = (u64)(agg_peak * 1000ULL / bcm->aux_data.unit); > + bcm->dirty = true; > +} > + > +static void qcom_icc_aggregate(struct icc_node *node) > +{ > + size_t i; > + struct icc_req *r; > + struct qcom_icc_node *qn; > + u64 agg_avg = 0; > + u64 agg_peak = 0; > + > + qn = node->data; > + > + hlist_for_each_entry(r, &node->req_list, req_node) { > + agg_avg += r->avg_bw; > + agg_peak = max(agg_peak, (u64)r->peak_bw); You can use max_t(u64, agg_peak, r->peak_bw); > + } > + > + qn->sum_avg = agg_avg; > + qn->max_peak = agg_peak; > + > + for(i = 0; i < qn->num_bcms; i++) { > + qcom_icc_bcm_aggregate(qn->bcms[i]); > + } > +} > + > +static int qcom_icc_set(struct icc_node *src, struct icc_node *dst, > + u32 avg, u32 peak) > +{ > + struct qcom_icc_provider *qp; > + struct qcom_icc_node *qn; > + struct icc_node *node; > + struct icc_provider *provider; > + struct tcs_cmd amc[SDM845_MAX_BCMS]; > + struct list_head commit_list; > + int n[SDM845_MAX_VCD]; > + int ret = 0, i; > + > + if (!src) > + node = dst; > + else > + node = src; > + > + qn = node->data; > + provider = node->provider; > + qp = to_qcom_provider(node->provider); > + > + INIT_LIST_HEAD(&commit_list); > + > + for(i = 0; i < qp->num_bcms; i++){ > + if(qp->bcms[i]->dirty == true) { if (qp->bcms[i]->dirty) { > + list_add_tail(&qp->bcms[i]->list, &commit_list); > + qp->bcms[i]->dirty = false; > + } > + } > + > + qcom_tcs_list_gen(&commit_list, amc, n); > + > + if (!n[0]) > + return ret; > + > + ret = rpmh_invalidate(qp->rpmh_client); > + if (ret){ > + pr_err("Error invalidating RPMH client (%d)\n", ret); > + return ret; > + } > + > + ret = rpmh_write_batch(qp->rpmh_client, RPMH_ACTIVE_ONLY_STATE, amc, n); > + if (ret){ > + pr_err("Error sending AMC RPMH requests (%d)\n", ret); > + return ret; > + } > + > + /* TODO: collect and send wake and sleep sets */ > + return ret; > +} > + > +static int cmp_vcd(const void *_l, const void *_r) > +{ > + const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm**)_l; > + const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm**)_r; > + > + if (l[0]->aux_data.vcd < r[0]->aux_data.vcd) > + return -1; > + else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd) > + return 0; > + else > + return 1; > +} > + > +static int qnoc_probe(struct platform_device *pdev) > +{ > + const struct qcom_icc_desc *desc; > + struct qcom_icc_node **qnodes; > + struct qcom_icc_provider *qp; > + struct resource *res; Unused > + struct icc_provider *provider; > + size_t num_nodes, i; > + int ret; > + > + desc = of_device_get_match_data(&pdev->dev); > + if (!desc) > + return -EINVAL; > + > + qnodes = desc->nodes; > + num_nodes = desc->num_nodes; > + > + qcom_dev = &pdev->dev; > + > + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); > + if (!qp) > + return -ENOMEM; > + > + provider = &qp->provider; > + provider->dev = &pdev->dev; > + provider->set = &qcom_icc_set; > + provider->aggregate = &qcom_icc_aggregate; > + INIT_LIST_HEAD(&provider->nodes); > + provider->data = qp; > + > + qp->rpmh_client = rpmh_get_client(pdev); > + qp->bcms = desc->bcms; > + qp->num_bcms = desc->num_bcms; > + > + ret = icc_provider_add(provider); > + if (ret) { > + dev_err(&pdev->dev, "error adding interconnect provider\n"); > + return ret; > + } > + > + for (i = 0; i < num_nodes; i++) { > + struct icc_node *node; > + int ret; > + size_t j; > + > + node = icc_node_create(qnodes[i]->id); > + if (IS_ERR(node)) { > + ret = PTR_ERR(node); > + goto err; > + } > + > + node->name = qnodes[i]->name; > + node->data = qnodes[i]; > + icc_node_add(node, provider); > + > + dev_dbg(&pdev->dev, "registered node %p %s %d\n", node, > + qnodes[i]->name, node->id); > + > + /* populate links */ > + for (j = 0; j < qnodes[i]->num_links; j++) > + if (qnodes[i]->links[j]) > + icc_link_create(node, qnodes[i]->links[j]); > + > + ret = qcom_icc_init(node); It would be better to call first init() and then create the links, as Evan has already suggested. > + if (ret) > + dev_err(&pdev->dev, "%s init error (%d)\n", node->name, > + ret); > + } > + > + for (i = 0; i < qp->num_bcms; i++) { > + qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); > + } Braces are not necessary. > + > + sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL); > + > + platform_set_drvdata(pdev, provider); > + dev_info(&pdev->dev, "Registered SDM845 ICC\n"); > + > + return ret; > +err: > + icc_provider_del(provider); > + return ret; > +} > + > +static int qnoc_remove(struct platform_device *pdev) > +{ > + struct icc_provider *provider = platform_get_drvdata(pdev); > + > + icc_provider_del(provider); > + > + return 0; > +} > + > +static const struct of_device_id qnoc_of_match[] = { > + { .compatible = "qcom,rsc-hlos-sdm845", .data = &sdm845_rsc_hlos }, The format of the compatible should be '<vendor>,<soc>-<block>', so in this case it's "qcom,sdm845-rsc-hlos". Also we need to add it to Documentation/devicetree/bindings/interconnect/qcom.txt Thanks, Georgi > + { }, > +}; > +MODULE_DEVICE_TABLE(of, qnoc_of_match); > + > +static struct platform_driver qnoc_driver = { > + .probe = qnoc_probe, > + .remove = qnoc_remove, > + .driver = { > + .name = "qnoc-sdm845", > + .of_match_table = qnoc_of_match, > + }, > +}; > +module_platform_driver(qnoc_driver); > + > +MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>"); > +MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver"); > +MODULE_LICENSE("GPL v2"); >
Hi Georgi, Thanks for the feedback! Will address them in the next patch. To answer the question about vote_x and vote_y, the threshold values are still used but just interpreted differently by the hardware depending on the type of the bcm. @type can also be more than latency or bandwidth, will expand on this in a future patch. An example of this is the active client vector(ACV) BCM node, where by vote_y is one hot encoded to indicate whether or not there's an active request on the ddr slave from a particular subsystem. Regards, David On 5/28/2018 6:50 AM, Georgi Djakov wrote: > Hi David, > Thank you for the patch! > > On 05/25/2018 09:02 PM, David Dai wrote: >> Introduce Qualcomm SDM845 specific provider driver using the >> interconnect framework. >> >> Change-Id: I18194854a6029e814b2deedc1f5ebae6dffd42bf > Please remove the change-id from all patches. > >> Signed-off-by: David Dai <daidavid1@codeaurora.org> >> --- >> drivers/interconnect/qcom/Kconfig | 6 + >> drivers/interconnect/qcom/Makefile | 1 + >> drivers/interconnect/qcom/qcom-icc-ids.h | 142 ++++++ >> drivers/interconnect/qcom/sdm845.c | 815 +++++++++++++++++++++++++++++++ >> 4 files changed, 964 insertions(+) >> create mode 100644 drivers/interconnect/qcom/qcom-icc-ids.h >> create mode 100644 drivers/interconnect/qcom/sdm845.c >> >> diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig >> index 86465dc..8901181 100644 >> --- a/drivers/interconnect/qcom/Kconfig >> +++ b/drivers/interconnect/qcom/Kconfig >> @@ -9,3 +9,9 @@ config INTERCONNECT_QCOM_MSM8916 >> depends on INTERCONNECT_QCOM >> help >> This is a driver for the Qualcomm Network-on-Chip on msm8916-based platforms. >> + >> +config INTERCONNECT_QCOM_SDM845 >> + tristate "Qualcomm SDM845 interconnect driver" >> + depends on INTERCONNECT_QCOM > maybe also: > depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST > >> + help >> + This is a driver for the Qualcomm Network-on-Chip on sdm845-based platforms. >> diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile >> index e8b24c3..9b08d01 100644 >> --- a/drivers/interconnect/qcom/Makefile >> +++ b/drivers/interconnect/qcom/Makefile >> @@ -2,3 +2,4 @@ >> obj-y += smd-rpm.o >> >> obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += msm8916.o >> +obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += sdm845.o >> diff --git a/drivers/interconnect/qcom/qcom-icc-ids.h b/drivers/interconnect/qcom/qcom-icc-ids.h >> new file mode 100644 >> index 0000000..527c299 >> --- /dev/null >> +++ b/drivers/interconnect/qcom/qcom-icc-ids.h > In order to use these ids in DT files, i have moved them recently into > include/dt-bindings/interconnect/qcom.h > > I will resend my updated patches shortly. > >> @@ -0,0 +1,142 @@ >> +// SPDX-License-Identifier: GPL-2.0 > The preferred format for header files is: > /* SPDX-License-Identifier: GPL-2.0 */ > >> +/* >> + * Copyright (c) 2018, The Linux Foundation. All rights reserved. >> + * >> + */ >> + >> +#ifndef _QCOM_ICC_IDS_H >> +#define _QCOM_ICC_IDS_H >> + >> +#define MASTER_APPSS_PROC 0 >> +#define MASTER_TCU_0 1 >> +#define MASTER_IPA_CORE 2 >> +#define MASTER_LLCC 3 > [..] > >> +#define SLAVE_SERVICE_GNOC 582 >> +#define SLAVE_SERVICE_MEM_NOC 583 >> +#define SLAVE_SERVICE_MNOC 584 >> +#define SLAVE_SERVICE_SNOC 585 >> +#define SLAVE_QDSS_STM 586 >> +#define SLAVE_TCU 587 > Please use just tabs with no spaces. > >> +#endif >> diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c >> new file mode 100644 >> index 0000000..f5a77fe >> --- /dev/null >> +++ b/drivers/interconnect/qcom/sdm845.c >> @@ -0,0 +1,815 @@ >> +// SPDX-License-Identifier: GPL-2.0 >> +/* >> + * Copyright (c) 2018, The Linux Foundation. All rights reserved. >> + * >> + */ >> + >> +#include <linux/device.h> >> +#include <linux/io.h> >> +#include <linux/interconnect.h> >> +#include <linux/interconnect-provider.h> >> +#include <linux/module.h> >> +#include <linux/of_device.h> >> +#include <linux/of_platform.h> >> +#include <linux/platform_device.h> >> +#include <linux/slab.h> >> +#include <linux/sort.h> >> +#include <linux/debugfs.h> > This header seems unused. > >> + >> +#include <soc/qcom/rpmh.h> >> +#include <soc/qcom/tcs.h> >> +#include <soc/qcom/cmd-db.h> > Alphabetical order please. > >> + >> +#include "qcom-icc-ids.h" >> + >> +#define BCM_TCS_CMD_COMMIT_SHFT 30 >> +#define BCM_TCS_CMD_COMMIT_MASK 0x40000000 >> +#define BCM_TCS_CMD_VALID_SHFT 29 >> +#define BCM_TCS_CMD_VALID_MASK 0x20000000 >> +#define BCM_TCS_CMD_VOTE_X_SHFT 14 >> +#define BCM_TCS_CMD_VOTE_MASK 0x3FFF >> +#define BCM_TCS_CMD_VOTE_Y_SHFT 0 >> +#define BCM_TCS_CMD_VOTE_Y_MASK 0xFFFC000 > Please use lower case for hexadecimals. > >> + >> +#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \ >> + (((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\ >> + ((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\ >> + ((vote_x & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) |\ >> + ((vote_y & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) >> + >> +#define to_qcom_provider(_provider) \ >> + container_of(_provider, struct qcom_icc_provider, provider) >> + >> +#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \ >> + _numlinks, ...) \ >> + static struct qcom_icc_node _name = { \ >> + .id = _id, \ >> + .name = #_name, \ >> + .channels = _channels, \ >> + .buswidth = _buswidth, \ >> + .num_links = _numlinks, \ >> + .links = { __VA_ARGS__ }, \ >> + } >> + >> +#define DEFINE_QBCM(_name, _bcmname, _numnodes, ...) \ >> + static struct qcom_icc_bcm _name = { \ >> + .num_nodes = _numnodes, \ >> + .name = _bcmname, \ >> + .nodes = { __VA_ARGS__ }, \ >> + } >> + >> +static struct device *qcom_dev; >> + >> +struct qcom_icc_provider { >> + struct icc_provider provider; >> + void __iomem *base; >> + struct rpmh_client *rpmh_client; >> + struct qcom_icc_bcm **bcms; >> + size_t num_bcms; >> +}; >> + >> +/** >> + * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM) >> + * @unit: bcm threshold values are in magnitudes of this >> + * @width: prototype width >> + * @vcd: virtual clock domain that this bcm belongs to >> +*/ > Please align to the previous line. > >> + >> +struct bcm_db { >> + u32 unit; >> + u16 width; >> + u8 vcd; >> + u8 reserved; >> +}; >> + >> +#define SDM845_MAX_LINKS 43 >> +#define SDM845_MAX_BCMS 30 >> +#define SDM845_MAX_BCM_PER_NODE 2 >> +#define SDM845_MAX_VCD 10 >> + >> +/** >> + * struct qcom_icc_node - Qualcomm specific interconnect nodes >> + * @name: the node name used in debugfs >> + * @links: an array of nodes where we can go next while traversing >> + * @id: a unique node identifier >> + * @num_links: the total number of @links >> + * @channels: num of channels at this node >> + * @buswidth: width of the interconnect between a node and the bus >> + * @sum_avg: current sum aggregate value of all avg bw requests >> + * @max_peak: current max aggregate value of all peak bw requests >> + * @bcms: list of bcms associated with this logical node >> + * @num_bcm: num of @bcms >> + */ >> +struct qcom_icc_node { >> + unsigned char *name; >> + u16 links[SDM845_MAX_LINKS]; >> + u16 id; >> + u16 num_links; >> + u16 channels; >> + u16 buswidth; >> + u64 sum_avg; >> + u64 max_peak; >> + struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE]; >> + size_t num_bcms; >> +}; >> + >> +/** >> + * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes >> + * known as Bus Clock Manager(BCM) >> + * @name: the bcm node name used to fetch BCM data from command db >> + * @type: latency or bandwidth bcm >> + * @addr: address offsets used when voting to RPMH >> + * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm >> + * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm > Are these used when @type is latency? > >> + * @dirty: flag used to indicate whether or bcm needs to be committed >> + * @aux_data: auxiliary data used when calculating threshold values and >> + * communicating with RPMh >> + * @list: used to link to other bcms when compiling lists for commit >> + * @num_nodes: total number of @num_nodes >> + * @nodes: list of qcom_icc_nodes that this BCM encapsulates >> + */ >> + >> +struct qcom_icc_bcm { >> + unsigned char *name; >> + u32 type; >> + u32 addr; >> + u64 vote_x; >> + u64 vote_y; >> + bool dirty; >> + struct bcm_db aux_data; >> + struct list_head list; >> + size_t num_nodes; >> + struct qcom_icc_node *nodes[]; >> +}; >> + >> +struct qcom_icc_fabric { >> + struct qcom_icc_node **nodes; >> + size_t num_nodes; >> + u32 base_offset; >> + u32 qos_offset; >> +}; > Unused > >> + >> +struct qcom_icc_desc { >> + struct qcom_icc_node **nodes; >> + size_t num_nodes; >> + struct qcom_icc_bcm **bcms; >> + size_t num_bcms; >> +}; >> + >> +DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC); >> +DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC); >> +DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC); >> +DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC); >> +DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC); >> +DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC); >> +DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC); >> +DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC); >> +DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC); >> +DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC); >> +DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC); >> +DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC); >> +DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC); >> +DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC); >> +DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC); >> +DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC); >> +DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC); >> +DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC); >> +DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); >> +DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); >> +DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); >> +DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC); >> +DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); >> +DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC); >> +DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); >> +DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG); >> +DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC); >> +DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC); >> +DEFINE_QNODE(ipa_core_master, MASTER_IPA_CORE, 1, 8, 1, SLAVE_IPA_CORE); >> +DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1); >> +DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); >> +DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC); >> +DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC); >> +DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); >> +DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); >> +DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC); >> +DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); >> +DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); >> +DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC); >> +DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); >> +DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); >> +DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); >> +DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); >> +DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); >> +DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); >> +DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); >> +DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); >> +DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC); >> +DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC); >> +DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); >> +DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); >> +DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); >> +DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); >> +DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM); >> +DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); >> +DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); >> +DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC); >> +DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0); >> +DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); >> +DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC); >> +DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); >> +DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0); >> +DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0); >> +DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG); >> +DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG); >> +DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0); >> +DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0); >> +DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0); >> +DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC); >> +DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0); >> +DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0); >> +DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0); >> +DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG); >> +DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0); >> +DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0); >> +DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0); >> +DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0); >> +DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0); >> +DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0); >> +DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG); >> +DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0); >> +DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0); >> +DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0); >> +DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0); >> +DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0); >> +DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0); >> +DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0); >> +DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0); >> +DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC); >> +DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0); >> +DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0); >> +DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG); >> +DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC); >> +DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC); >> +DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0); >> +DEFINE_QNODE(ipa_core_slave, SLAVE_IPA_CORE, 1, 8, 0); >> +DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0); >> +DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0); >> +DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0); >> +DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC); >> +DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC); >> +DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0); >> +DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC); >> +DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC); >> +DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0); >> +DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0); >> +DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC); >> +DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC); >> +DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC); >> +DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0); >> +DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0); >> +DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0); >> +DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0); >> +DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0); >> +DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0); >> +DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0); >> + >> +DEFINE_QBCM(bcm_acv, "ACV", 1, &ebi); >> +DEFINE_QBCM(bcm_mc0, "MC0", 1, &ebi); >> +DEFINE_QBCM(bcm_sh0, "SH0", 1, &qns_llcc); >> +DEFINE_QBCM(bcm_mm0, "MM0", 1, &qns_mem_noc_hf); >> +DEFINE_QBCM(bcm_sh1, "SH1", 1, &qns_apps_io); >> +DEFINE_QBCM(bcm_mm1, "MM1", 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1); >> +DEFINE_QBCM(bcm_sh2, "SH2", 1, &qns_memnoc_snoc); >> +DEFINE_QBCM(bcm_mm2, "MM2", 1, &qns2_mem_noc); >> +DEFINE_QBCM(bcm_sh3, "SH3", 1, &acm_tcu); >> +DEFINE_QBCM(bcm_mm3, "MM3", 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9); >> +DEFINE_QBCM(bcm_sh5, "SH5", 1, &qnm_apps); >> +DEFINE_QBCM(bcm_sn0, "SN0", 1, &qns_memnoc_sf); >> +DEFINE_QBCM(bcm_ce0, "CE0", 1, &qxm_crypto); >> +DEFINE_QBCM(bcm_ip0, "IP0", 1, &ipa_core_slave); >> +DEFINE_QBCM(bcm_cn0, "CN0", 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc); >> +DEFINE_QBCM(bcm_qup0, "QUP0", 2, &qhm_qup1, &qhm_qup2); >> +DEFINE_QBCM(bcm_sn1, "SN1", 1, &qxs_imem); >> +DEFINE_QBCM(bcm_sn2, "SN2", 1, &qns_memnoc_gc); >> +DEFINE_QBCM(bcm_sn3, "SN3", 1, &qns_cnoc); >> +DEFINE_QBCM(bcm_sn4, "SN4", 1, &qxm_pimem); >> +DEFINE_QBCM(bcm_sn5, "SN5", 1, &xs_qdss_stm); >> +DEFINE_QBCM(bcm_sn6, "SN6", 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg); >> +DEFINE_QBCM(bcm_sn7, "SN7", 1, &qxs_pcie); >> +DEFINE_QBCM(bcm_sn8, "SN8", 1, &qxs_pcie_gen3); >> +DEFINE_QBCM(bcm_sn9, "SN9", 2, &srvc_aggre1_noc, &qnm_aggre1_noc); >> +DEFINE_QBCM(bcm_sn11, "SN11", 2, &srvc_aggre2_noc, &qnm_aggre2_noc); >> +DEFINE_QBCM(bcm_sn12, "SN12", 2, &qnm_gladiator_sodv, &xm_gic); >> +DEFINE_QBCM(bcm_sn14, "SN14", 1, &qnm_pcie_anoc); >> +DEFINE_QBCM(bcm_sn15, "SN15", 1, &qnm_memnoc); >> + >> +static struct qcom_icc_node *rsc_hlos_nodes[] = { >> + &acm_l3, >> + &acm_tcu, >> + &ipa_core_master, >> + &llcc_mc, >> + &pm_gnoc_cfg, >> + &qhm_a1noc_cfg, >> + &qhm_a2noc_cfg, >> + &qhm_cnoc, >> + &qhm_memnoc_cfg, >> + &qhm_mnoc_cfg, >> + &qhm_qdss_bam, >> + &qhm_qup1, >> + &qhm_qup2, >> + &qhm_snoc_cfg, >> + &qhm_spdm, >> + &qhm_tic, >> + &qhm_tsif, >> + &qnm_aggre1_noc, >> + &qnm_aggre2_noc, >> + &qnm_apps, >> + &qnm_cnoc, >> + &qnm_gladiator_sodv, >> + &qnm_memnoc, >> + &qnm_mnoc_hf, >> + &qnm_mnoc_sf, >> + &qnm_pcie_anoc, >> + &qnm_snoc, >> + &qnm_snoc_gc, >> + &qnm_snoc_sf, >> + &qxm_camnoc_hf0, >> + &qxm_camnoc_hf0_uncomp, >> + &qxm_camnoc_hf1, >> + &qxm_camnoc_hf1_uncomp, >> + &qxm_camnoc_sf, >> + &qxm_camnoc_sf_uncomp, >> + &qxm_crypto, >> + &qxm_gpu, >> + &qxm_ipa, >> + &qxm_mdp0, >> + &qxm_mdp1, >> + &qxm_pimem, >> + &qxm_rot, >> + &qxm_venus0, >> + &qxm_venus1, >> + &qxm_venus_arm9, >> + &xm_gic, >> + &xm_pcie3_1, >> + &xm_pcie_0, >> + &xm_qdss_dap, >> + &xm_qdss_etr, >> + &xm_sdc2, >> + &xm_sdc4, >> + &xm_ufs_card, >> + &xm_ufs_mem, >> + &xm_usb3_0, >> + &xm_usb3_1, >> + &ebi, >> + &ipa_core_slave, >> + &qhs_a1_noc_cfg, >> + &qhs_a2_noc_cfg, >> + &qhs_aop, >> + &qhs_aoss, >> + &qhs_apss, >> + &qhs_camera_cfg, >> + &qhs_clk_ctl, >> + &qhs_compute_dsp_cfg, >> + &qhs_cpr_cx, >> + &qhs_crypto0_cfg, >> + &qhs_dcc_cfg, >> + &qhs_ddrss_cfg, >> + &qhs_display_cfg, >> + &qhs_glm, >> + &qhs_gpuss_cfg, >> + &qhs_imem_cfg, >> + &qhs_ipa, >> + &qhs_llcc, >> + &qhs_mdsp_ms_mpu_cfg, >> + &qhs_memnoc, >> + &qhs_mnoc_cfg, >> + &qhs_pcie0_cfg, >> + &qhs_pcie_gen3_cfg, >> + &qhs_pdm, >> + &qhs_phy_refgen_south, >> + &qhs_pimem_cfg, >> + &qhs_prng, >> + &qhs_qdss_cfg, >> + &qhs_qupv3_north, >> + &qhs_qupv3_south, >> + &qhs_sdc2, >> + &qhs_sdc4, >> + &qhs_snoc_cfg, >> + &qhs_spdm, >> + &qhs_spss_cfg, >> + &qhs_tcsr, >> + &qhs_tlmm_north, >> + &qhs_tlmm_south, >> + &qhs_tsif, >> + &qhs_ufs_card_cfg, >> + &qhs_ufs_mem_cfg, >> + &qhs_usb3_0, >> + &qhs_usb3_1, >> + &qhs_venus_cfg, >> + &qhs_vsense_ctrl_cfg, >> + &qns2_mem_noc, >> + &qns_a1noc_snoc, >> + &qns_a2noc_snoc, >> + &qns_apps_io, >> + &qns_camnoc_uncomp, >> + &qns_cnoc, >> + &qns_cnoc_a2noc, >> + &qns_gladiator_sodv, >> + &qns_gnoc_memnoc, >> + &qns_llcc, >> + &qns_mem_noc_hf, >> + &qns_memnoc_gc, >> + &qns_memnoc_sf, >> + &qns_memnoc_snoc, >> + &qns_pcie_a1noc_snoc, >> + &qns_pcie_snoc, >> + &qxs_imem, >> + &qxs_pcie, >> + &qxs_pcie_gen3, >> + &qxs_pimem, >> + &srvc_aggre1_noc, >> + &srvc_aggre2_noc, >> + &srvc_cnoc, >> + &srvc_gnoc, >> + &srvc_memnoc, >> + &srvc_mnoc, >> + &srvc_snoc, >> + &xs_qdss_stm, >> + &xs_sys_tcu_cfg, >> +}; >> + >> +static struct qcom_icc_bcm *rsc_hlos_bcms[] = { >> + &bcm_acv, >> + &bcm_mc0, >> + &bcm_sh0, >> + &bcm_mm0, >> + &bcm_sh1, >> + &bcm_mm1, >> + &bcm_sh2, >> + &bcm_mm2, >> + &bcm_sh3, >> + &bcm_mm3, >> + &bcm_sh5, >> + &bcm_sn0, >> + &bcm_ce0, >> + &bcm_ip0, >> + &bcm_cn0, >> + &bcm_qup0, >> + &bcm_sn1, >> + &bcm_sn2, >> + &bcm_sn3, >> + &bcm_sn4, >> + &bcm_sn5, >> + &bcm_sn6, >> + &bcm_sn7, >> + &bcm_sn8, >> + &bcm_sn9, >> + &bcm_sn11, >> + &bcm_sn12, >> + &bcm_sn14, >> + &bcm_sn15, >> +}; >> + >> +static struct qcom_icc_desc sdm845_rsc_hlos = { >> + .nodes = rsc_hlos_nodes, >> + .num_nodes = ARRAY_SIZE(rsc_hlos_nodes), >> + .bcms = rsc_hlos_bcms, >> + .num_bcms = ARRAY_SIZE(rsc_hlos_bcms), >> +}; >> + >> +static int qcom_icc_init(struct icc_node *node) >> +{ >> + /* TODO: init qos and priority */ >> + >> + return 0; >> +} >> + >> +static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev) >> +{ >> + struct bcm_db buf = {0}; >> + struct qcom_icc_node *qn; >> + int ret, i; >> + >> + bcm->addr = cmd_db_read_addr(bcm->name); >> + if (!bcm->addr) { >> + dev_err(dev, "%s could not find RPMh address\n", >> + bcm->name); >> + return -EINVAL; >> + } >> + >> + if (!cmd_db_read_aux_data_len(bcm->name)) { >> + dev_err(dev, "%s command db missing aux data\n", >> + bcm->name); >> + return -EINVAL; >> + } >> + >> + ret = cmd_db_read_aux_data(bcm->name, (u8 *)&buf, >> + sizeof(struct bcm_db)); > Please align to the open parenthesis. > >> + if (ret < 0) { >> + dev_err(dev, "%s command db read error (%d)\n", >> + bcm->name, ret); >> + return ret; >> + } >> + >> + bcm->aux_data = buf; >> + >> + for (i = 0; i < bcm->num_nodes; i++){ > Please add a space before the open brace. > >> + qn = bcm->nodes[i]; >> + qn->bcms[qn->num_bcms] = bcm; >> + qn->num_bcms++; >> + } >> + >> + return 0; >> +} >> + >> +static int qcom_tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, >> + u64 vote_y, u32 addr, bool commit) >> +{ >> + int ret = 0; >> + bool valid = true; >> + >> + if (!cmd) >> + return ret; >> + >> + if (vote_x == 0 && vote_y == 0) >> + valid = false; >> + >> + if (vote_x > BCM_TCS_CMD_VOTE_MASK) >> + vote_x = BCM_TCS_CMD_VOTE_MASK; >> + >> + if (vote_y > BCM_TCS_CMD_VOTE_MASK) >> + vote_y = BCM_TCS_CMD_VOTE_MASK; >> + >> + cmd->addr = addr; >> + cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y); >> + cmd->wait = commit; >> + >> + return ret; >> +} >> + >> +static void qcom_tcs_list_gen(struct list_head *bcm_list, >> + struct tcs_cmd *tcs_list, int *n) >> +{ >> + struct qcom_icc_bcm *bcm; >> + bool commit; >> + size_t idx = 0, batch = 0, cur_vcd_size = 0; >> + >> + memset(n, 0, sizeof(int) * SDM845_MAX_VCD); >> + >> + list_for_each_entry(bcm, bcm_list, list){ >> + commit = false; >> + cur_vcd_size++; >> + if((bcm->aux_data.vcd != >> + list_next_entry(bcm, list)->aux_data.vcd) || >> + list_is_last(&bcm->list, bcm_list)) { >> + commit = true; >> + cur_vcd_size = 0; >> + } >> + qcom_tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y, >> + bcm->addr, commit); >> + idx++; >> + n[batch]++; >> + if (n[batch] >= MAX_RPMH_PAYLOAD && commit == false) { > !commit instead of (commit == false) > >> + n[batch] -= cur_vcd_size; >> + batch++; >> + n[batch] = cur_vcd_size; >> + } >> + } >> + n[batch+1] = 0; >> +} >> + >> +static void qcom_icc_bcm_aggregate(struct qcom_icc_bcm *bcm) >> +{ >> + size_t i; >> + u64 agg_avg = 0; >> + u64 agg_peak = 0; >> + >> + for (i = 0; i < bcm->num_nodes; i++){ >> + agg_avg = max(agg_avg, >> + bcm->nodes[i]->sum_avg * bcm->aux_data.width / >> + (bcm->nodes[i]->buswidth * bcm->nodes[i]->channels)); >> + agg_peak = max(agg_peak, >> + bcm->nodes[i]->max_peak * bcm->aux_data.width / >> + bcm->nodes[i]->buswidth); >> + } >> + >> + bcm->vote_x = (u64)(agg_avg * 1000ULL / bcm->aux_data.unit); >> + bcm->vote_y = (u64)(agg_peak * 1000ULL / bcm->aux_data.unit); >> + bcm->dirty = true; >> +} >> + >> +static void qcom_icc_aggregate(struct icc_node *node) >> +{ >> + size_t i; >> + struct icc_req *r; >> + struct qcom_icc_node *qn; >> + u64 agg_avg = 0; >> + u64 agg_peak = 0; >> + >> + qn = node->data; >> + >> + hlist_for_each_entry(r, &node->req_list, req_node) { >> + agg_avg += r->avg_bw; >> + agg_peak = max(agg_peak, (u64)r->peak_bw); > You can use max_t(u64, agg_peak, r->peak_bw); > >> + } >> + >> + qn->sum_avg = agg_avg; >> + qn->max_peak = agg_peak; >> + >> + for(i = 0; i < qn->num_bcms; i++) { >> + qcom_icc_bcm_aggregate(qn->bcms[i]); >> + } >> +} >> + >> +static int qcom_icc_set(struct icc_node *src, struct icc_node *dst, >> + u32 avg, u32 peak) >> +{ >> + struct qcom_icc_provider *qp; >> + struct qcom_icc_node *qn; >> + struct icc_node *node; >> + struct icc_provider *provider; >> + struct tcs_cmd amc[SDM845_MAX_BCMS]; >> + struct list_head commit_list; >> + int n[SDM845_MAX_VCD]; >> + int ret = 0, i; >> + >> + if (!src) >> + node = dst; >> + else >> + node = src; >> + >> + qn = node->data; >> + provider = node->provider; >> + qp = to_qcom_provider(node->provider); >> + >> + INIT_LIST_HEAD(&commit_list); >> + >> + for(i = 0; i < qp->num_bcms; i++){ >> + if(qp->bcms[i]->dirty == true) { > if (qp->bcms[i]->dirty) { > >> + list_add_tail(&qp->bcms[i]->list, &commit_list); >> + qp->bcms[i]->dirty = false; >> + } >> + } >> + >> + qcom_tcs_list_gen(&commit_list, amc, n); >> + >> + if (!n[0]) >> + return ret; >> + >> + ret = rpmh_invalidate(qp->rpmh_client); >> + if (ret){ >> + pr_err("Error invalidating RPMH client (%d)\n", ret); >> + return ret; >> + } >> + >> + ret = rpmh_write_batch(qp->rpmh_client, RPMH_ACTIVE_ONLY_STATE, amc, n); >> + if (ret){ >> + pr_err("Error sending AMC RPMH requests (%d)\n", ret); >> + return ret; >> + } >> + >> + /* TODO: collect and send wake and sleep sets */ >> + return ret; >> +} >> + >> +static int cmp_vcd(const void *_l, const void *_r) >> +{ >> + const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm**)_l; >> + const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm**)_r; >> + >> + if (l[0]->aux_data.vcd < r[0]->aux_data.vcd) >> + return -1; >> + else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd) >> + return 0; >> + else >> + return 1; >> +} >> + >> +static int qnoc_probe(struct platform_device *pdev) >> +{ >> + const struct qcom_icc_desc *desc; >> + struct qcom_icc_node **qnodes; >> + struct qcom_icc_provider *qp; >> + struct resource *res; > Unused > >> + struct icc_provider *provider; >> + size_t num_nodes, i; >> + int ret; >> + >> + desc = of_device_get_match_data(&pdev->dev); >> + if (!desc) >> + return -EINVAL; >> + >> + qnodes = desc->nodes; >> + num_nodes = desc->num_nodes; >> + >> + qcom_dev = &pdev->dev; >> + >> + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); >> + if (!qp) >> + return -ENOMEM; >> + >> + provider = &qp->provider; >> + provider->dev = &pdev->dev; >> + provider->set = &qcom_icc_set; >> + provider->aggregate = &qcom_icc_aggregate; >> + INIT_LIST_HEAD(&provider->nodes); >> + provider->data = qp; >> + >> + qp->rpmh_client = rpmh_get_client(pdev); >> + qp->bcms = desc->bcms; >> + qp->num_bcms = desc->num_bcms; >> + >> + ret = icc_provider_add(provider); >> + if (ret) { >> + dev_err(&pdev->dev, "error adding interconnect provider\n"); >> + return ret; >> + } >> + >> + for (i = 0; i < num_nodes; i++) { >> + struct icc_node *node; >> + int ret; >> + size_t j; >> + >> + node = icc_node_create(qnodes[i]->id); >> + if (IS_ERR(node)) { >> + ret = PTR_ERR(node); >> + goto err; >> + } >> + >> + node->name = qnodes[i]->name; >> + node->data = qnodes[i]; >> + icc_node_add(node, provider); >> + >> + dev_dbg(&pdev->dev, "registered node %p %s %d\n", node, >> + qnodes[i]->name, node->id); >> + >> + /* populate links */ >> + for (j = 0; j < qnodes[i]->num_links; j++) >> + if (qnodes[i]->links[j]) >> + icc_link_create(node, qnodes[i]->links[j]); >> + >> + ret = qcom_icc_init(node); > It would be better to call first init() and then create the links, as > Evan has already suggested. > >> + if (ret) >> + dev_err(&pdev->dev, "%s init error (%d)\n", node->name, >> + ret); >> + } >> + >> + for (i = 0; i < qp->num_bcms; i++) { >> + qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); >> + } > Braces are not necessary. > >> + >> + sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL); >> + >> + platform_set_drvdata(pdev, provider); >> + dev_info(&pdev->dev, "Registered SDM845 ICC\n"); >> + >> + return ret; >> +err: >> + icc_provider_del(provider); >> + return ret; >> +} >> + >> +static int qnoc_remove(struct platform_device *pdev) >> +{ >> + struct icc_provider *provider = platform_get_drvdata(pdev); >> + >> + icc_provider_del(provider); >> + >> + return 0; >> +} >> + >> +static const struct of_device_id qnoc_of_match[] = { >> + { .compatible = "qcom,rsc-hlos-sdm845", .data = &sdm845_rsc_hlos }, > The format of the compatible should be '<vendor>,<soc>-<block>', so in > this case it's "qcom,sdm845-rsc-hlos". Also we need to add it to > Documentation/devicetree/bindings/interconnect/qcom.txt > > Thanks, > Georgi > >> + { }, >> +}; >> +MODULE_DEVICE_TABLE(of, qnoc_of_match); >> + >> +static struct platform_driver qnoc_driver = { >> + .probe = qnoc_probe, >> + .remove = qnoc_remove, >> + .driver = { >> + .name = "qnoc-sdm845", >> + .of_match_table = qnoc_of_match, >> + }, >> +}; >> +module_platform_driver(qnoc_driver); >> + >> +MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>"); >> +MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver"); >> +MODULE_LICENSE("GPL v2"); >>
Hi David, On Fri, May 25, 2018 at 11:02 AM David Dai <daidavid1@codeaurora.org> wrote: > > Introduce Qualcomm SDM845 specific provider driver using the > interconnect framework. > > Change-Id: I18194854a6029e814b2deedc1f5ebae6dffd42bf > Signed-off-by: David Dai <daidavid1@codeaurora.org> > --- > drivers/interconnect/qcom/Kconfig | 6 + > drivers/interconnect/qcom/Makefile | 1 + > drivers/interconnect/qcom/qcom-icc-ids.h | 142 ++++++ > drivers/interconnect/qcom/sdm845.c | 815 +++++++++++++++++++++++++++++++ > 4 files changed, 964 insertions(+) > create mode 100644 drivers/interconnect/qcom/qcom-icc-ids.h > create mode 100644 drivers/interconnect/qcom/sdm845.c > ... > diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c > new file mode 100644 > index 0000000..f5a77fe > --- /dev/null > +++ b/drivers/interconnect/qcom/sdm845.c > @@ -0,0 +1,815 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * Copyright (c) 2018, The Linux Foundation. All rights reserved. > + * > + */ > + > +#include <linux/device.h> > +#include <linux/io.h> > +#include <linux/interconnect.h> > +#include <linux/interconnect-provider.h> > +#include <linux/module.h> > +#include <linux/of_device.h> > +#include <linux/of_platform.h> > +#include <linux/platform_device.h> > +#include <linux/slab.h> > +#include <linux/sort.h> > +#include <linux/debugfs.h> > + > +#include <soc/qcom/rpmh.h> > +#include <soc/qcom/tcs.h> > +#include <soc/qcom/cmd-db.h> > + > +#include "qcom-icc-ids.h" > + > +#define BCM_TCS_CMD_COMMIT_SHFT 30 > +#define BCM_TCS_CMD_COMMIT_MASK 0x40000000 > +#define BCM_TCS_CMD_VALID_SHFT 29 > +#define BCM_TCS_CMD_VALID_MASK 0x20000000 > +#define BCM_TCS_CMD_VOTE_X_SHFT 14 > +#define BCM_TCS_CMD_VOTE_MASK 0x3FFF > +#define BCM_TCS_CMD_VOTE_Y_SHFT 0 > +#define BCM_TCS_CMD_VOTE_Y_MASK 0xFFFC000 > + > +#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \ > + (((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\ > + ((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\ The & 1 isn't really necesary for commit and valid is it? > + ((vote_x & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) |\ > + ((vote_y & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) > + > +#define to_qcom_provider(_provider) \ > + container_of(_provider, struct qcom_icc_provider, provider) > + > +#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \ > + _numlinks, ...) \ > + static struct qcom_icc_node _name = { \ > + .id = _id, \ > + .name = #_name, \ > + .channels = _channels, \ > + .buswidth = _buswidth, \ > + .num_links = _numlinks, \ > + .links = { __VA_ARGS__ }, \ > + } > + > +#define DEFINE_QBCM(_name, _bcmname, _numnodes, ...) \ > + static struct qcom_icc_bcm _name = { \ > + .num_nodes = _numnodes, \ > + .name = _bcmname, \ > + .nodes = { __VA_ARGS__ }, \ > + } > + > +static struct device *qcom_dev; You assign this in once place and never use it. Remove? > + > +struct qcom_icc_provider { > + struct icc_provider provider; > + void __iomem *base; > + struct rpmh_client *rpmh_client; > + struct qcom_icc_bcm **bcms; > + size_t num_bcms; > +}; > + > +/** > + * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM) > + * @unit: bcm threshold values are in magnitudes of this > + * @width: prototype width > + * @vcd: virtual clock domain that this bcm belongs to > +*/ > + > +struct bcm_db { > + u32 unit; > + u16 width; > + u8 vcd; > + u8 reserved; > +}; > + > +#define SDM845_MAX_LINKS 43 > +#define SDM845_MAX_BCMS 30 > +#define SDM845_MAX_BCM_PER_NODE 2 > +#define SDM845_MAX_VCD 10 > + > +/** > + * struct qcom_icc_node - Qualcomm specific interconnect nodes > + * @name: the node name used in debugfs > + * @links: an array of nodes where we can go next while traversing > + * @id: a unique node identifier > + * @num_links: the total number of @links > + * @channels: num of channels at this node > + * @buswidth: width of the interconnect between a node and the bus > + * @sum_avg: current sum aggregate value of all avg bw requests > + * @max_peak: current max aggregate value of all peak bw requests > + * @bcms: list of bcms associated with this logical node > + * @num_bcm: num of @bcms > + */ > +struct qcom_icc_node { > + unsigned char *name; const char * > + u16 links[SDM845_MAX_LINKS]; > + u16 id; > + u16 num_links; > + u16 channels; > + u16 buswidth; > + u64 sum_avg; > + u64 max_peak; > + struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE]; > + size_t num_bcms; > +}; > + > +/** > + * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes > + * known as Bus Clock Manager(BCM) > + * @name: the bcm node name used to fetch BCM data from command db > + * @type: latency or bandwidth bcm > + * @addr: address offsets used when voting to RPMH > + * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm > + * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm > + * @dirty: flag used to indicate whether or bcm needs to be committed > + * @aux_data: auxiliary data used when calculating threshold values and > + * communicating with RPMh > + * @list: used to link to other bcms when compiling lists for commit > + * @num_nodes: total number of @num_nodes > + * @nodes: list of qcom_icc_nodes that this BCM encapsulates > + */ > + > +struct qcom_icc_bcm { > + unsigned char *name; const char * again. > + u32 type; > + u32 addr; > + u64 vote_x; > + u64 vote_y; > + bool dirty; > + struct bcm_db aux_data; > + struct list_head list; > + size_t num_nodes; > + struct qcom_icc_node *nodes[]; > +}; > + > +struct qcom_icc_fabric { > + struct qcom_icc_node **nodes; > + size_t num_nodes; > + u32 base_offset; > + u32 qos_offset; > +}; > + > +struct qcom_icc_desc { > + struct qcom_icc_node **nodes; > + size_t num_nodes; > + struct qcom_icc_bcm **bcms; > + size_t num_bcms; > +}; > + ... > + > +static struct qcom_icc_desc sdm845_rsc_hlos = { > + .nodes = rsc_hlos_nodes, > + .num_nodes = ARRAY_SIZE(rsc_hlos_nodes), > + .bcms = rsc_hlos_bcms, > + .num_bcms = ARRAY_SIZE(rsc_hlos_bcms), > +}; > + > +static int qcom_icc_init(struct icc_node *node) > +{ > + /* TODO: init qos and priority */ > + > + return 0; > +} > + > +static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev) > +{ > + struct bcm_db buf = {0}; > + struct qcom_icc_node *qn; > + int ret, i; > + > + bcm->addr = cmd_db_read_addr(bcm->name); > + if (!bcm->addr) { > + dev_err(dev, "%s could not find RPMh address\n", > + bcm->name); > + return -EINVAL; > + } > + > + if (!cmd_db_read_aux_data_len(bcm->name)) { > + dev_err(dev, "%s command db missing aux data\n", > + bcm->name); > + return -EINVAL; > + } > + > + ret = cmd_db_read_aux_data(bcm->name, (u8 *)&buf, > + sizeof(struct bcm_db)); > + if (ret < 0) { > + dev_err(dev, "%s command db read error (%d)\n", > + bcm->name, ret); > + return ret; > + } This doesn't work on big endian systems. You can do the wholesale copy but then you need to convert the members to little endian. Also, why do you copy it into a local and then into bcm->aux_data? Why not just read directly into bcm->aux_data? > + > + bcm->aux_data = buf; > + > + for (i = 0; i < bcm->num_nodes; i++){ > + qn = bcm->nodes[i]; > + qn->bcms[qn->num_bcms] = bcm; > + qn->num_bcms++; > + } > + > + return 0; > +} > + > +static int qcom_tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, > + u64 vote_y, u32 addr, bool commit) > +{ > + int ret = 0; > + bool valid = true; > + > + if (!cmd) > + return ret; If this is just to catch errors during development, please remove. > + > + if (vote_x == 0 && vote_y == 0) > + valid = false; > + > + if (vote_x > BCM_TCS_CMD_VOTE_MASK) > + vote_x = BCM_TCS_CMD_VOTE_MASK; > + > + if (vote_y > BCM_TCS_CMD_VOTE_MASK) > + vote_y = BCM_TCS_CMD_VOTE_MASK; > + > + cmd->addr = addr; > + cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y); > + cmd->wait = commit; Why is it called wait in cmd and commit in the parameter? Those feel like two different meanings. You don't commit anything here, so we should probably name the parameter wait everywhere (in the macro too). Another reason I think wait is a better name: once these get sent, it's not like the commands without the flag don't go through; they're still committed, the flag just says "don't move on until it's all really done", right? > + > + return ret; > +} > + > +static void qcom_tcs_list_gen(struct list_head *bcm_list, > + struct tcs_cmd *tcs_list, int *n) > +{ > + struct qcom_icc_bcm *bcm; > + bool commit; > + size_t idx = 0, batch = 0, cur_vcd_size = 0; > + > + memset(n, 0, sizeof(int) * SDM845_MAX_VCD); > + > + list_for_each_entry(bcm, bcm_list, list){ > + commit = false; > + cur_vcd_size++; > + if((bcm->aux_data.vcd != > + list_next_entry(bcm, list)->aux_data.vcd) || > + list_is_last(&bcm->list, bcm_list)) { > + commit = true; > + cur_vcd_size = 0; > + } > + qcom_tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y, > + bcm->addr, commit); > + idx++; > + n[batch]++; > + if (n[batch] >= MAX_RPMH_PAYLOAD && commit == false) { > + n[batch] -= cur_vcd_size; > + batch++; > + n[batch] = cur_vcd_size; > + } This is very confusing, what's going on here? If commit is false, it looks like you're moving the current set of commands for the VCD into the next batch. But if commit is true, we just hit a batch boundary, and so the _next_ command should go in a new batch. Is that correct? What if you have a series of more than MAX_RPMH_PAYLOAD requests where each request has a different VCD? Or what if you have (MAX_RPMH_PAYLOAD - 1) requests in the same VCD, and then a string of 3 requests all in different VCDs? I don't get what the rules are between batch and commit. > + } > + n[batch+1] = 0; > +} > + > +static void qcom_icc_bcm_aggregate(struct qcom_icc_bcm *bcm) > +{ > + size_t i; > + u64 agg_avg = 0; > + u64 agg_peak = 0; > + > + for (i = 0; i < bcm->num_nodes; i++){ > + agg_avg = max(agg_avg, > + bcm->nodes[i]->sum_avg * bcm->aux_data.width / > + (bcm->nodes[i]->buswidth * bcm->nodes[i]->channels)); > + agg_peak = max(agg_peak, > + bcm->nodes[i]->max_peak * bcm->aux_data.width / > + bcm->nodes[i]->buswidth); > + } > + > + bcm->vote_x = (u64)(agg_avg * 1000ULL / bcm->aux_data.unit); > + bcm->vote_y = (u64)(agg_peak * 1000ULL / bcm->aux_data.unit); > + bcm->dirty = true; > +} > + > +static void qcom_icc_aggregate(struct icc_node *node) > +{ > + size_t i; > + struct icc_req *r; > + struct qcom_icc_node *qn; > + u64 agg_avg = 0; > + u64 agg_peak = 0; > + > + qn = node->data; > + > + hlist_for_each_entry(r, &node->req_list, req_node) { > + agg_avg += r->avg_bw; > + agg_peak = max(agg_peak, (u64)r->peak_bw); > + } > + > + qn->sum_avg = agg_avg; > + qn->max_peak = agg_peak; > + > + for(i = 0; i < qn->num_bcms; i++) { > + qcom_icc_bcm_aggregate(qn->bcms[i]); > + } > +} > + > +static int qcom_icc_set(struct icc_node *src, struct icc_node *dst, > + u32 avg, u32 peak) > +{ > + struct qcom_icc_provider *qp; > + struct qcom_icc_node *qn; > + struct icc_node *node; > + struct icc_provider *provider; > + struct tcs_cmd amc[SDM845_MAX_BCMS]; > + struct list_head commit_list; > + int n[SDM845_MAX_VCD]; > + int ret = 0, i; > + > + if (!src) > + node = dst; > + else > + node = src; > + > + qn = node->data; > + provider = node->provider; > + qp = to_qcom_provider(node->provider); > + > + INIT_LIST_HEAD(&commit_list); > + > + for(i = 0; i < qp->num_bcms; i++){ > + if(qp->bcms[i]->dirty == true) { > + list_add_tail(&qp->bcms[i]->list, &commit_list); > + qp->bcms[i]->dirty = false; > + } > + } > + > + qcom_tcs_list_gen(&commit_list, amc, n); Perhaps this will be cleared up when you respond to my comment above, but doesn't n need to be sized as max(SDM845_MAX_BCMS, SDM845_MAX_VCD)? Are BCMs always guaranteed to be in VCD order? If not, then a long string of BCMs in a crappy VCD order (eg ABCABCABC rather than AAABBBCCC) could overflow MAX_VCD. Also I know it's an array of sizes, but can you come up with a more descriptive name than n? Ah, wait, I see below that you do sort the BCMs by VCD. Perhaps a comment here indicating that would be good. > + > + if (!n[0]) > + return ret; > + > + ret = rpmh_invalidate(qp->rpmh_client); > + if (ret){ > + pr_err("Error invalidating RPMH client (%d)\n", ret); > + return ret; > + } > + > + ret = rpmh_write_batch(qp->rpmh_client, RPMH_ACTIVE_ONLY_STATE, amc, n); > + if (ret){ > + pr_err("Error sending AMC RPMH requests (%d)\n", ret); > + return ret; > + } > + > + /* TODO: collect and send wake and sleep sets */ > + return ret; > +} > + > +static int cmp_vcd(const void *_l, const void *_r) > +{ > + const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm**)_l; > + const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm**)_r; > + > + if (l[0]->aux_data.vcd < r[0]->aux_data.vcd) > + return -1; > + else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd) > + return 0; > + else > + return 1; > +} > + > +static int qnoc_probe(struct platform_device *pdev) > +{ > + const struct qcom_icc_desc *desc; > + struct qcom_icc_node **qnodes; > + struct qcom_icc_provider *qp; > + struct resource *res; > + struct icc_provider *provider; > + size_t num_nodes, i; > + int ret; > + > + desc = of_device_get_match_data(&pdev->dev); > + if (!desc) > + return -EINVAL; > + > + qnodes = desc->nodes; > + num_nodes = desc->num_nodes; > + > + qcom_dev = &pdev->dev; > + > + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); > + if (!qp) > + return -ENOMEM; > + > + provider = &qp->provider; > + provider->dev = &pdev->dev; > + provider->set = &qcom_icc_set; > + provider->aggregate = &qcom_icc_aggregate; > + INIT_LIST_HEAD(&provider->nodes); > + provider->data = qp; > + > + qp->rpmh_client = rpmh_get_client(pdev); > + qp->bcms = desc->bcms; > + qp->num_bcms = desc->num_bcms; > + > + ret = icc_provider_add(provider); > + if (ret) { > + dev_err(&pdev->dev, "error adding interconnect provider\n"); > + return ret; > + } > + > + for (i = 0; i < num_nodes; i++) { > + struct icc_node *node; > + int ret; > + size_t j; > + > + node = icc_node_create(qnodes[i]->id); > + if (IS_ERR(node)) { > + ret = PTR_ERR(node); > + goto err; > + } > + > + node->name = qnodes[i]->name; > + node->data = qnodes[i]; > + icc_node_add(node, provider); > + > + dev_dbg(&pdev->dev, "registered node %p %s %d\n", node, > + qnodes[i]->name, node->id); > + > + /* populate links */ > + for (j = 0; j < qnodes[i]->num_links; j++) > + if (qnodes[i]->links[j]) > + icc_link_create(node, qnodes[i]->links[j]); > + > + ret = qcom_icc_init(node); > + if (ret) > + dev_err(&pdev->dev, "%s init error (%d)\n", node->name, > + ret); > + } > + > + for (i = 0; i < qp->num_bcms; i++) { > + qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); > + } > + > + sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL); Add a comment explaining why you're sorting by vcd. Is it just to avoid overflow with bad ordering, or would odd behavior crop up if these requests were sent to the hardware in a scattered fashion? Is the entire concept of a VCD only implemented here by this ordering? > + > + platform_set_drvdata(pdev, provider); > + dev_info(&pdev->dev, "Registered SDM845 ICC\n"); > + > + return ret; > +err: > + icc_provider_del(provider); > + return ret; > +} > + > +static int qnoc_remove(struct platform_device *pdev) > +{ > + struct icc_provider *provider = platform_get_drvdata(pdev); > + > + icc_provider_del(provider); > + > + return 0; > +} > + > +static const struct of_device_id qnoc_of_match[] = { > + { .compatible = "qcom,rsc-hlos-sdm845", .data = &sdm845_rsc_hlos }, > + { }, > +}; > +MODULE_DEVICE_TABLE(of, qnoc_of_match); > + > +static struct platform_driver qnoc_driver = { > + .probe = qnoc_probe, > + .remove = qnoc_remove, > + .driver = { > + .name = "qnoc-sdm845", > + .of_match_table = qnoc_of_match, > + }, > +}; > +module_platform_driver(qnoc_driver); > + > +MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>"); > +MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver"); > +MODULE_LICENSE("GPL v2"); > -- > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum, > a Linux Foundation Collaborative Project >
Hi Evan, Thank you for taking the time to review this, On 5/30/2018 3:46 PM, Evan Green wrote: > Hi David, > > On Fri, May 25, 2018 at 11:02 AM David Dai <daidavid1@codeaurora.org> wrote: >> Introduce Qualcomm SDM845 specific provider driver using the >> interconnect framework. >> >> Change-Id: I18194854a6029e814b2deedc1f5ebae6dffd42bf >> Signed-off-by: David Dai <daidavid1@codeaurora.org> >> --- >> drivers/interconnect/qcom/Kconfig | 6 + >> drivers/interconnect/qcom/Makefile | 1 + >> drivers/interconnect/qcom/qcom-icc-ids.h | 142 ++++++ >> drivers/interconnect/qcom/sdm845.c | 815 +++++++++++++++++++++++++++++++ >> 4 files changed, 964 insertions(+) >> create mode 100644 drivers/interconnect/qcom/qcom-icc-ids.h >> create mode 100644 drivers/interconnect/qcom/sdm845.c >> > ... >> diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c >> new file mode 100644 >> index 0000000..f5a77fe >> --- /dev/null >> +++ b/drivers/interconnect/qcom/sdm845.c >> @@ -0,0 +1,815 @@ >> +// SPDX-License-Identifier: GPL-2.0 >> +/* >> + * Copyright (c) 2018, The Linux Foundation. All rights reserved. >> + * >> + */ >> + >> +#include <linux/device.h> >> +#include <linux/io.h> >> +#include <linux/interconnect.h> >> +#include <linux/interconnect-provider.h> >> +#include <linux/module.h> >> +#include <linux/of_device.h> >> +#include <linux/of_platform.h> >> +#include <linux/platform_device.h> >> +#include <linux/slab.h> >> +#include <linux/sort.h> >> +#include <linux/debugfs.h> >> + >> +#include <soc/qcom/rpmh.h> >> +#include <soc/qcom/tcs.h> >> +#include <soc/qcom/cmd-db.h> >> + >> +#include "qcom-icc-ids.h" >> + >> +#define BCM_TCS_CMD_COMMIT_SHFT 30 >> +#define BCM_TCS_CMD_COMMIT_MASK 0x40000000 >> +#define BCM_TCS_CMD_VALID_SHFT 29 >> +#define BCM_TCS_CMD_VALID_MASK 0x20000000 >> +#define BCM_TCS_CMD_VOTE_X_SHFT 14 >> +#define BCM_TCS_CMD_VOTE_MASK 0x3FFF >> +#define BCM_TCS_CMD_VOTE_Y_SHFT 0 >> +#define BCM_TCS_CMD_VOTE_Y_MASK 0xFFFC000 >> + >> +#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \ >> + (((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\ >> + ((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\ > The & 1 isn't really necesary for commit and valid is it? Acked >> + ((vote_x & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) |\ >> + ((vote_y & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) >> + >> +#define to_qcom_provider(_provider) \ >> + container_of(_provider, struct qcom_icc_provider, provider) >> + >> +#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \ >> + _numlinks, ...) \ >> + static struct qcom_icc_node _name = { \ >> + .id = _id, \ >> + .name = #_name, \ >> + .channels = _channels, \ >> + .buswidth = _buswidth, \ >> + .num_links = _numlinks, \ >> + .links = { __VA_ARGS__ }, \ >> + } >> + >> +#define DEFINE_QBCM(_name, _bcmname, _numnodes, ...) \ >> + static struct qcom_icc_bcm _name = { \ >> + .num_nodes = _numnodes, \ >> + .name = _bcmname, \ >> + .nodes = { __VA_ARGS__ }, \ >> + } >> + >> +static struct device *qcom_dev; > You assign this in once place and never use it. Remove? Acked >> + >> +struct qcom_icc_provider { >> + struct icc_provider provider; >> + void __iomem *base; >> + struct rpmh_client *rpmh_client; >> + struct qcom_icc_bcm **bcms; >> + size_t num_bcms; >> +}; >> + >> +/** >> + * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM) >> + * @unit: bcm threshold values are in magnitudes of this >> + * @width: prototype width >> + * @vcd: virtual clock domain that this bcm belongs to >> +*/ >> + >> +struct bcm_db { >> + u32 unit; >> + u16 width; >> + u8 vcd; >> + u8 reserved; >> +}; >> + >> +#define SDM845_MAX_LINKS 43 >> +#define SDM845_MAX_BCMS 30 >> +#define SDM845_MAX_BCM_PER_NODE 2 >> +#define SDM845_MAX_VCD 10 >> + >> +/** >> + * struct qcom_icc_node - Qualcomm specific interconnect nodes >> + * @name: the node name used in debugfs >> + * @links: an array of nodes where we can go next while traversing >> + * @id: a unique node identifier >> + * @num_links: the total number of @links >> + * @channels: num of channels at this node >> + * @buswidth: width of the interconnect between a node and the bus >> + * @sum_avg: current sum aggregate value of all avg bw requests >> + * @max_peak: current max aggregate value of all peak bw requests >> + * @bcms: list of bcms associated with this logical node >> + * @num_bcm: num of @bcms >> + */ >> +struct qcom_icc_node { >> + unsigned char *name; > const char * Acked >> + u16 links[SDM845_MAX_LINKS]; >> + u16 id; >> + u16 num_links; >> + u16 channels; >> + u16 buswidth; >> + u64 sum_avg; >> + u64 max_peak; >> + struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE]; >> + size_t num_bcms; >> +}; >> + >> +/** >> + * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes >> + * known as Bus Clock Manager(BCM) >> + * @name: the bcm node name used to fetch BCM data from command db >> + * @type: latency or bandwidth bcm >> + * @addr: address offsets used when voting to RPMH >> + * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm >> + * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm >> + * @dirty: flag used to indicate whether or bcm needs to be committed >> + * @aux_data: auxiliary data used when calculating threshold values and >> + * communicating with RPMh >> + * @list: used to link to other bcms when compiling lists for commit >> + * @num_nodes: total number of @num_nodes >> + * @nodes: list of qcom_icc_nodes that this BCM encapsulates >> + */ >> + >> +struct qcom_icc_bcm { >> + unsigned char *name; > const char * again. Acked >> + u32 type; >> + u32 addr; >> + u64 vote_x; >> + u64 vote_y; >> + bool dirty; >> + struct bcm_db aux_data; >> + struct list_head list; >> + size_t num_nodes; >> + struct qcom_icc_node *nodes[]; >> +}; >> + >> +struct qcom_icc_fabric { >> + struct qcom_icc_node **nodes; >> + size_t num_nodes; >> + u32 base_offset; >> + u32 qos_offset; >> +}; >> + >> +struct qcom_icc_desc { >> + struct qcom_icc_node **nodes; >> + size_t num_nodes; >> + struct qcom_icc_bcm **bcms; >> + size_t num_bcms; >> +}; >> + > ... >> + >> +static struct qcom_icc_desc sdm845_rsc_hlos = { >> + .nodes = rsc_hlos_nodes, >> + .num_nodes = ARRAY_SIZE(rsc_hlos_nodes), >> + .bcms = rsc_hlos_bcms, >> + .num_bcms = ARRAY_SIZE(rsc_hlos_bcms), >> +}; >> + >> +static int qcom_icc_init(struct icc_node *node) >> +{ >> + /* TODO: init qos and priority */ >> + >> + return 0; >> +} >> + >> +static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev) >> +{ >> + struct bcm_db buf = {0}; >> + struct qcom_icc_node *qn; >> + int ret, i; >> + >> + bcm->addr = cmd_db_read_addr(bcm->name); >> + if (!bcm->addr) { >> + dev_err(dev, "%s could not find RPMh address\n", >> + bcm->name); >> + return -EINVAL; >> + } >> + >> + if (!cmd_db_read_aux_data_len(bcm->name)) { >> + dev_err(dev, "%s command db missing aux data\n", >> + bcm->name); >> + return -EINVAL; >> + } >> + >> + ret = cmd_db_read_aux_data(bcm->name, (u8 *)&buf, >> + sizeof(struct bcm_db)); >> + if (ret < 0) { >> + dev_err(dev, "%s command db read error (%d)\n", >> + bcm->name, ret); >> + return ret; >> + } > This doesn't work on big endian systems. You can do the wholesale copy > but then you need to convert the members to little endian. Also, why > do you copy it into a local and then into bcm->aux_data? Why not just > read directly into bcm->aux_data? Acked >> + >> + bcm->aux_data = buf; >> + >> + for (i = 0; i < bcm->num_nodes; i++){ >> + qn = bcm->nodes[i]; >> + qn->bcms[qn->num_bcms] = bcm; >> + qn->num_bcms++; >> + } >> + >> + return 0; >> +} >> + >> +static int qcom_tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, >> + u64 vote_y, u32 addr, bool commit) >> +{ >> + int ret = 0; >> + bool valid = true; >> + >> + if (!cmd) >> + return ret; > If this is just to catch errors during development, please remove. Acked >> + >> + if (vote_x == 0 && vote_y == 0) >> + valid = false; >> + >> + if (vote_x > BCM_TCS_CMD_VOTE_MASK) >> + vote_x = BCM_TCS_CMD_VOTE_MASK; >> + >> + if (vote_y > BCM_TCS_CMD_VOTE_MASK) >> + vote_y = BCM_TCS_CMD_VOTE_MASK; >> + >> + cmd->addr = addr; >> + cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y); >> + cmd->wait = commit; > Why is it called wait in cmd and commit in the parameter? Those feel > like two different meanings. You don't commit anything here, so we > should probably name the parameter wait everywhere (in the macro too). > Another reason I think wait is a better name: once these get sent, > it's not like the commands without the flag don't go through; they're > still committed, the flag just says "don't move on until it's all > really done", right? The commit bit part of the data let's the hardware know when to trigger the aggregation, we want this to happen at the end of a grouping of BCMs within a VCD, so that we have all the updated commands for a particular grouping of BCMs in that VCD. Subsequently, we need an ack from the hardware to let us know that the aggregation was successful before we free the tcs slot. I think the flag actually means "don't free the entire tcs slot until we receive the completion signal from the hardware", I believe the commands in the same tcs slot following the cmd with the commit bit will continue to execute to take advantage of BCM's parallelism, but will not free up the tcs slot until it has received all the completion signals. It just so happens we need to set the wait flag on a command with the commit bit, not that wait has the same meaning as commit, though it definitely looks that way due to how it's written. I can change how that looks or add a comment detailing why we need to set the wait for completion bit and how it matches up with the commit bit in the data. >> + >> + return ret; >> +} >> + >> +static void qcom_tcs_list_gen(struct list_head *bcm_list, >> + struct tcs_cmd *tcs_list, int *n) >> +{ >> + struct qcom_icc_bcm *bcm; >> + bool commit; >> + size_t idx = 0, batch = 0, cur_vcd_size = 0; >> + >> + memset(n, 0, sizeof(int) * SDM845_MAX_VCD); >> + >> + list_for_each_entry(bcm, bcm_list, list){ >> + commit = false; >> + cur_vcd_size++; >> + if((bcm->aux_data.vcd != >> + list_next_entry(bcm, list)->aux_data.vcd) || >> + list_is_last(&bcm->list, bcm_list)) { >> + commit = true; >> + cur_vcd_size = 0; >> + } >> + qcom_tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y, >> + bcm->addr, commit); >> + idx++; >> + n[batch]++; >> + if (n[batch] >= MAX_RPMH_PAYLOAD && commit == false) { >> + n[batch] -= cur_vcd_size; >> + batch++; >> + n[batch] = cur_vcd_size; >> + } > This is very confusing, what's going on here? If commit is false, it > looks like you're moving the current set of commands for the VCD into > the next batch. But if commit is true, we just hit a batch boundary, > and so the _next_ command should go in a new batch. Is that correct? > What if you have a series of more than MAX_RPMH_PAYLOAD requests where > each request has a different VCD? Or what if you have > (MAX_RPMH_PAYLOAD - 1) requests in the same VCD, and then a string of > 3 requests all in different VCDs? I don't get what the rules are > between batch and commit. I admit this part wasn't too well thought out, I will tweak the logic to fix it, but yes the idea is to move the current VCD into the next batch if it ends up cutting up a group of VCDs. There's also an implicit assumption here that # of bcms per VCD will always be less than MAX_RPMH_PAYLOAD. > >> + } >> + n[batch+1] = 0; >> +} >> + >> +static void qcom_icc_bcm_aggregate(struct qcom_icc_bcm *bcm) >> +{ >> + size_t i; >> + u64 agg_avg = 0; >> + u64 agg_peak = 0; >> + >> + for (i = 0; i < bcm->num_nodes; i++){ >> + agg_avg = max(agg_avg, >> + bcm->nodes[i]->sum_avg * bcm->aux_data.width / >> + (bcm->nodes[i]->buswidth * bcm->nodes[i]->channels)); >> + agg_peak = max(agg_peak, >> + bcm->nodes[i]->max_peak * bcm->aux_data.width / >> + bcm->nodes[i]->buswidth); >> + } >> + >> + bcm->vote_x = (u64)(agg_avg * 1000ULL / bcm->aux_data.unit); >> + bcm->vote_y = (u64)(agg_peak * 1000ULL / bcm->aux_data.unit); >> + bcm->dirty = true; >> +} >> + >> +static void qcom_icc_aggregate(struct icc_node *node) >> +{ >> + size_t i; >> + struct icc_req *r; >> + struct qcom_icc_node *qn; >> + u64 agg_avg = 0; >> + u64 agg_peak = 0; >> + >> + qn = node->data; >> + >> + hlist_for_each_entry(r, &node->req_list, req_node) { >> + agg_avg += r->avg_bw; >> + agg_peak = max(agg_peak, (u64)r->peak_bw); >> + } >> + >> + qn->sum_avg = agg_avg; >> + qn->max_peak = agg_peak; >> + >> + for(i = 0; i < qn->num_bcms; i++) { >> + qcom_icc_bcm_aggregate(qn->bcms[i]); >> + } >> +} >> + >> +static int qcom_icc_set(struct icc_node *src, struct icc_node *dst, >> + u32 avg, u32 peak) >> +{ >> + struct qcom_icc_provider *qp; >> + struct qcom_icc_node *qn; >> + struct icc_node *node; >> + struct icc_provider *provider; >> + struct tcs_cmd amc[SDM845_MAX_BCMS]; >> + struct list_head commit_list; >> + int n[SDM845_MAX_VCD]; >> + int ret = 0, i; >> + >> + if (!src) >> + node = dst; >> + else >> + node = src; >> + >> + qn = node->data; >> + provider = node->provider; >> + qp = to_qcom_provider(node->provider); >> + >> + INIT_LIST_HEAD(&commit_list); >> + >> + for(i = 0; i < qp->num_bcms; i++){ >> + if(qp->bcms[i]->dirty == true) { >> + list_add_tail(&qp->bcms[i]->list, &commit_list); >> + qp->bcms[i]->dirty = false; >> + } >> + } >> + >> + qcom_tcs_list_gen(&commit_list, amc, n); > Perhaps this will be cleared up when you respond to my comment above, > but doesn't n need to be sized as max(SDM845_MAX_BCMS, > SDM845_MAX_VCD)? Are BCMs always guaranteed to be in VCD order? If > not, then a long string of BCMs in a crappy VCD order (eg ABCABCABC > rather than AAABBBCCC) could overflow MAX_VCD. Also I know it's an > array of sizes, but can you come up with a more descriptive name than > n? There's technically no size limit to n, rpmh driver iterates and rotates through the tcs slots as one batch completes after the other. I think the RPMH driver set an arbitrary limit of RPMH_MAX_REQ_IN_BATCH(10), that I can use here, there's no practical use cases that would have such a large number of batches though. I used n as it was consistent with the naming in rpmh, but I can change it to be more descriptive. > > Ah, wait, I see below that you do sort the BCMs by VCD. Perhaps a > comment here indicating that would be good. > >> + >> + if (!n[0]) >> + return ret; >> + >> + ret = rpmh_invalidate(qp->rpmh_client); >> + if (ret){ >> + pr_err("Error invalidating RPMH client (%d)\n", ret); >> + return ret; >> + } >> + >> + ret = rpmh_write_batch(qp->rpmh_client, RPMH_ACTIVE_ONLY_STATE, amc, n); >> + if (ret){ >> + pr_err("Error sending AMC RPMH requests (%d)\n", ret); >> + return ret; >> + } >> + >> + /* TODO: collect and send wake and sleep sets */ >> + return ret; >> +} >> + >> +static int cmp_vcd(const void *_l, const void *_r) >> +{ >> + const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm**)_l; >> + const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm**)_r; >> + >> + if (l[0]->aux_data.vcd < r[0]->aux_data.vcd) >> + return -1; >> + else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd) >> + return 0; >> + else >> + return 1; >> +} >> + >> +static int qnoc_probe(struct platform_device *pdev) >> +{ >> + const struct qcom_icc_desc *desc; >> + struct qcom_icc_node **qnodes; >> + struct qcom_icc_provider *qp; >> + struct resource *res; >> + struct icc_provider *provider; >> + size_t num_nodes, i; >> + int ret; >> + >> + desc = of_device_get_match_data(&pdev->dev); >> + if (!desc) >> + return -EINVAL; >> + >> + qnodes = desc->nodes; >> + num_nodes = desc->num_nodes; >> + >> + qcom_dev = &pdev->dev; >> + >> + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); >> + if (!qp) >> + return -ENOMEM; >> + >> + provider = &qp->provider; >> + provider->dev = &pdev->dev; >> + provider->set = &qcom_icc_set; >> + provider->aggregate = &qcom_icc_aggregate; >> + INIT_LIST_HEAD(&provider->nodes); >> + provider->data = qp; >> + >> + qp->rpmh_client = rpmh_get_client(pdev); >> + qp->bcms = desc->bcms; >> + qp->num_bcms = desc->num_bcms; >> + >> + ret = icc_provider_add(provider); >> + if (ret) { >> + dev_err(&pdev->dev, "error adding interconnect provider\n"); >> + return ret; >> + } >> + >> + for (i = 0; i < num_nodes; i++) { >> + struct icc_node *node; >> + int ret; >> + size_t j; >> + >> + node = icc_node_create(qnodes[i]->id); >> + if (IS_ERR(node)) { >> + ret = PTR_ERR(node); >> + goto err; >> + } >> + >> + node->name = qnodes[i]->name; >> + node->data = qnodes[i]; >> + icc_node_add(node, provider); >> + >> + dev_dbg(&pdev->dev, "registered node %p %s %d\n", node, >> + qnodes[i]->name, node->id); >> + >> + /* populate links */ >> + for (j = 0; j < qnodes[i]->num_links; j++) >> + if (qnodes[i]->links[j]) >> + icc_link_create(node, qnodes[i]->links[j]); >> + >> + ret = qcom_icc_init(node); >> + if (ret) >> + dev_err(&pdev->dev, "%s init error (%d)\n", node->name, >> + ret); >> + } >> + >> + for (i = 0; i < qp->num_bcms; i++) { >> + qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); >> + } >> + >> + sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL); > Add a comment explaining why you're sorting by vcd. Is it just to > avoid overflow with bad ordering, or would odd behavior crop up if > these requests were sent to the hardware in a scattered fashion? Is > the entire concept of a VCD only implemented here by this ordering? This is both an optimization and also done for a hardware limitation. We want to group the VCD to prevent repeat aggregation in a batch request, but also to reduce the number of commits with sending a batch commands due to the fact there's a hard limitation of how many concurrent aggregations can occur. The VCDs are also ordered in such a way that the most expensive operations are ordered lowest and allows us to take greater advantage of BCM's parallelism. I will add a comment for this. >> + >> + platform_set_drvdata(pdev, provider); >> + dev_info(&pdev->dev, "Registered SDM845 ICC\n"); >> + >> + return ret; >> +err: >> + icc_provider_del(provider); >> + return ret; >> +} >> + >> +static int qnoc_remove(struct platform_device *pdev) >> +{ >> + struct icc_provider *provider = platform_get_drvdata(pdev); >> + >> + icc_provider_del(provider); >> + >> + return 0; >> +} >> + >> +static const struct of_device_id qnoc_of_match[] = { >> + { .compatible = "qcom,rsc-hlos-sdm845", .data = &sdm845_rsc_hlos }, >> + { }, >> +}; >> +MODULE_DEVICE_TABLE(of, qnoc_of_match); >> + >> +static struct platform_driver qnoc_driver = { >> + .probe = qnoc_probe, >> + .remove = qnoc_remove, >> + .driver = { >> + .name = "qnoc-sdm845", >> + .of_match_table = qnoc_of_match, >> + }, >> +}; >> +module_platform_driver(qnoc_driver); >> + >> +MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>"); >> +MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver"); >> +MODULE_LICENSE("GPL v2"); >> -- >> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum, >> a Linux Foundation Collaborative Project >>
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig index 86465dc..8901181 100644 --- a/drivers/interconnect/qcom/Kconfig +++ b/drivers/interconnect/qcom/Kconfig @@ -9,3 +9,9 @@ config INTERCONNECT_QCOM_MSM8916 depends on INTERCONNECT_QCOM help This is a driver for the Qualcomm Network-on-Chip on msm8916-based platforms. + +config INTERCONNECT_QCOM_SDM845 + tristate "Qualcomm SDM845 interconnect driver" + depends on INTERCONNECT_QCOM + help + This is a driver for the Qualcomm Network-on-Chip on sdm845-based platforms. diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile index e8b24c3..9b08d01 100644 --- a/drivers/interconnect/qcom/Makefile +++ b/drivers/interconnect/qcom/Makefile @@ -2,3 +2,4 @@ obj-y += smd-rpm.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += msm8916.o +obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += sdm845.o diff --git a/drivers/interconnect/qcom/qcom-icc-ids.h b/drivers/interconnect/qcom/qcom-icc-ids.h new file mode 100644 index 0000000..527c299 --- /dev/null +++ b/drivers/interconnect/qcom/qcom-icc-ids.h @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + */ + +#ifndef _QCOM_ICC_IDS_H +#define _QCOM_ICC_IDS_H + +#define MASTER_APPSS_PROC 0 +#define MASTER_TCU_0 1 +#define MASTER_IPA_CORE 2 +#define MASTER_LLCC 3 +#define MASTER_GNOC_CFG 4 +#define MASTER_A1NOC_CFG 5 +#define MASTER_A2NOC_CFG 6 +#define MASTER_CNOC_DC_NOC 7 +#define MASTER_MEM_NOC_CFG 8 +#define MASTER_CNOC_MNOC_CFG 9 +#define MASTER_QDSS_BAM 10 +#define MASTER_BLSP_1 11 +#define MASTER_BLSP_2 12 +#define MASTER_SNOC_CFG 13 +#define MASTER_SPDM 14 +#define MASTER_TIC 15 +#define MASTER_TSIF 16 +#define MASTER_A1NOC_SNOC 17 +#define MASTER_A2NOC_SNOC 18 +#define MASTER_GNOC_MEM_NOC 19 +#define MASTER_CNOC_A2NOC 20 +#define MASTER_GNOC_SNOC 21 +#define MASTER_MEM_NOC_SNOC 22 +#define MASTER_MNOC_HF_MEM_NOC 23 +#define MASTER_MNOC_SF_MEM_NOC 24 +#define MASTER_ANOC_PCIE_SNOC 25 +#define MASTER_SNOC_CNOC 26 +#define MASTER_SNOC_GC_MEM_NOC 27 +#define MASTER_SNOC_SF_MEM_NOC 28 +#define MASTER_CAMNOC_HF0 29 +#define MASTER_CAMNOC_HF0_UNCOMP 30 +#define MASTER_CAMNOC_HF1 31 +#define MASTER_CAMNOC_HF1_UNCOMP 32 +#define MASTER_CAMNOC_SF 33 +#define MASTER_CAMNOC_SF_UNCOMP 34 +#define MASTER_CRYPTO 35 +#define MASTER_GFX3D 36 +#define MASTER_IPA 37 +#define MASTER_MDP0 38 +#define MASTER_MDP1 39 +#define MASTER_PIMEM 40 +#define MASTER_ROTATOR 41 +#define MASTER_VIDEO_P0 42 +#define MASTER_VIDEO_P1 43 +#define MASTER_VIDEO_PROC 44 +#define MASTER_GIC 45 +#define MASTER_PCIE_1 46 +#define MASTER_PCIE_0 47 +#define MASTER_QDSS_DAP 48 +#define MASTER_QDSS_ETR 49 +#define MASTER_SDCC_2 50 +#define MASTER_SDCC_4 51 +#define MASTER_UFS_CARD 52 +#define MASTER_UFS_MEM 53 +#define MASTER_USB3_0 54 +#define MASTER_USB3_1 55 +#define SLAVE_EBI1 512 +#define SLAVE_IPA_CORE 513 +#define SLAVE_A1NOC_CFG 514 +#define SLAVE_A2NOC_CFG 515 +#define SLAVE_AOP 516 +#define SLAVE_AOSS 517 +#define SLAVE_APPSS 518 +#define SLAVE_CAMERA_CFG 519 +#define SLAVE_CLK_CTL 520 +#define SLAVE_CDSP_CFG 521 +#define SLAVE_RBCPR_CX_CFG 522 +#define SLAVE_CRYPTO_0_CFG 523 +#define SLAVE_DCC_CFG 524 +#define SLAVE_CNOC_DDRSS 525 +#define SLAVE_DISPLAY_CFG 526 +#define SLAVE_GLM 527 +#define SLAVE_GFX3D_CFG 528 +#define SLAVE_IMEM_CFG 529 +#define SLAVE_IPA_CFG 530 +#define SLAVE_LLCC_CFG 531 +#define SLAVE_MSS_PROC_MS_MPU_CFG 532 +#define SLAVE_MEM_NOC_CFG 533 +#define SLAVE_CNOC_MNOC_CFG 534 +#define SLAVE_PCIE_0_CFG 535 +#define SLAVE_PCIE_1_CFG 536 +#define SLAVE_PDM 537 +#define SLAVE_SOUTH_PHY_CFG 538 +#define SLAVE_PIMEM_CFG 539 +#define SLAVE_PRNG 540 +#define SLAVE_QDSS_CFG 541 +#define SLAVE_BLSP_2 542 +#define SLAVE_BLSP_1 543 +#define SLAVE_SDCC_2 544 +#define SLAVE_SDCC_4 545 +#define SLAVE_SNOC_CFG 546 +#define SLAVE_SPDM_WRAPPER 547 +#define SLAVE_SPSS_CFG 548 +#define SLAVE_TCSR 549 +#define SLAVE_TLMM_NORTH 550 +#define SLAVE_TLMM_SOUTH 551 +#define SLAVE_TSIF 552 +#define SLAVE_UFS_CARD_CFG 553 +#define SLAVE_UFS_MEM_CFG 554 +#define SLAVE_USB3_0 555 +#define SLAVE_USB3_1 556 +#define SLAVE_VENUS_CFG 557 +#define SLAVE_VSENSE_CTRL_CFG 558 +#define SLAVE_MNOC_SF_MEM_NOC 559 +#define SLAVE_A1NOC_SNOC 560 +#define SLAVE_A2NOC_SNOC 561 +#define SLAVE_MEM_NOC_GNOC 562 +#define SLAVE_CAMNOC_UNCOMP 563 +#define SLAVE_SNOC_CNOC 564 +#define SLAVE_CNOC_A2NOC 565 +#define SLAVE_GNOC_SNOC 566 +#define SLAVE_GNOC_MEM_NOC 567 +#define SLAVE_LLCC 568 +#define SLAVE_MNOC_HF_MEM_NOC 569 +#define SLAVE_SNOC_MEM_NOC_GC 570 +#define SLAVE_SNOC_MEM_NOC_SF 571 +#define SLAVE_MEM_NOC_SNOC 572 +#define SLAVE_ANOC_PCIE_A1NOC_SNOC 573 +#define SLAVE_ANOC_PCIE_SNOC 574 +#define SLAVE_IMEM 575 +#define SLAVE_PCIE_0 576 +#define SLAVE_PCIE_1 577 +#define SLAVE_PIMEM 578 +#define SLAVE_SERVICE_A1NOC 579 +#define SLAVE_SERVICE_A2NOC 580 +#define SLAVE_SERVICE_CNOC 581 +#define SLAVE_SERVICE_GNOC 582 +#define SLAVE_SERVICE_MEM_NOC 583 +#define SLAVE_SERVICE_MNOC 584 +#define SLAVE_SERVICE_SNOC 585 +#define SLAVE_QDSS_STM 586 +#define SLAVE_TCU 587 +#endif diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c new file mode 100644 index 0000000..f5a77fe --- /dev/null +++ b/drivers/interconnect/qcom/sdm845.c @@ -0,0 +1,815 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/interconnect.h> +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/sort.h> +#include <linux/debugfs.h> + +#include <soc/qcom/rpmh.h> +#include <soc/qcom/tcs.h> +#include <soc/qcom/cmd-db.h> + +#include "qcom-icc-ids.h" + +#define BCM_TCS_CMD_COMMIT_SHFT 30 +#define BCM_TCS_CMD_COMMIT_MASK 0x40000000 +#define BCM_TCS_CMD_VALID_SHFT 29 +#define BCM_TCS_CMD_VALID_MASK 0x20000000 +#define BCM_TCS_CMD_VOTE_X_SHFT 14 +#define BCM_TCS_CMD_VOTE_MASK 0x3FFF +#define BCM_TCS_CMD_VOTE_Y_SHFT 0 +#define BCM_TCS_CMD_VOTE_Y_MASK 0xFFFC000 + +#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \ + (((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\ + ((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\ + ((vote_x & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) |\ + ((vote_y & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) + +#define to_qcom_provider(_provider) \ + container_of(_provider, struct qcom_icc_provider, provider) + +#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \ + _numlinks, ...) \ + static struct qcom_icc_node _name = { \ + .id = _id, \ + .name = #_name, \ + .channels = _channels, \ + .buswidth = _buswidth, \ + .num_links = _numlinks, \ + .links = { __VA_ARGS__ }, \ + } + +#define DEFINE_QBCM(_name, _bcmname, _numnodes, ...) \ + static struct qcom_icc_bcm _name = { \ + .num_nodes = _numnodes, \ + .name = _bcmname, \ + .nodes = { __VA_ARGS__ }, \ + } + +static struct device *qcom_dev; + +struct qcom_icc_provider { + struct icc_provider provider; + void __iomem *base; + struct rpmh_client *rpmh_client; + struct qcom_icc_bcm **bcms; + size_t num_bcms; +}; + +/** + * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM) + * @unit: bcm threshold values are in magnitudes of this + * @width: prototype width + * @vcd: virtual clock domain that this bcm belongs to +*/ + +struct bcm_db { + u32 unit; + u16 width; + u8 vcd; + u8 reserved; +}; + +#define SDM845_MAX_LINKS 43 +#define SDM845_MAX_BCMS 30 +#define SDM845_MAX_BCM_PER_NODE 2 +#define SDM845_MAX_VCD 10 + +/** + * struct qcom_icc_node - Qualcomm specific interconnect nodes + * @name: the node name used in debugfs + * @links: an array of nodes where we can go next while traversing + * @id: a unique node identifier + * @num_links: the total number of @links + * @channels: num of channels at this node + * @buswidth: width of the interconnect between a node and the bus + * @sum_avg: current sum aggregate value of all avg bw requests + * @max_peak: current max aggregate value of all peak bw requests + * @bcms: list of bcms associated with this logical node + * @num_bcm: num of @bcms + */ +struct qcom_icc_node { + unsigned char *name; + u16 links[SDM845_MAX_LINKS]; + u16 id; + u16 num_links; + u16 channels; + u16 buswidth; + u64 sum_avg; + u64 max_peak; + struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE]; + size_t num_bcms; +}; + +/** + * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes + * known as Bus Clock Manager(BCM) + * @name: the bcm node name used to fetch BCM data from command db + * @type: latency or bandwidth bcm + * @addr: address offsets used when voting to RPMH + * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm + * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm + * @dirty: flag used to indicate whether or bcm needs to be committed + * @aux_data: auxiliary data used when calculating threshold values and + * communicating with RPMh + * @list: used to link to other bcms when compiling lists for commit + * @num_nodes: total number of @num_nodes + * @nodes: list of qcom_icc_nodes that this BCM encapsulates + */ + +struct qcom_icc_bcm { + unsigned char *name; + u32 type; + u32 addr; + u64 vote_x; + u64 vote_y; + bool dirty; + struct bcm_db aux_data; + struct list_head list; + size_t num_nodes; + struct qcom_icc_node *nodes[]; +}; + +struct qcom_icc_fabric { + struct qcom_icc_node **nodes; + size_t num_nodes; + u32 base_offset; + u32 qos_offset; +}; + +struct qcom_icc_desc { + struct qcom_icc_node **nodes; + size_t num_nodes; + struct qcom_icc_bcm **bcms; + size_t num_bcms; +}; + +DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC); +DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC); +DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC); +DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC); +DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); +DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); +DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); +DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC); +DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); +DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC); +DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); +DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG); +DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC); +DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC); +DEFINE_QNODE(ipa_core_master, MASTER_IPA_CORE, 1, 8, 1, SLAVE_IPA_CORE); +DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1); +DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); +DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC); +DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC); +DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); +DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); +DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC); +DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); +DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); +DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC); +DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC); +DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); +DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); +DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); +DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); +DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM); +DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); +DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); +DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC); +DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0); +DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); +DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC); +DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); +DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0); +DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0); +DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG); +DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG); +DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0); +DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0); +DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0); +DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC); +DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0); +DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0); +DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0); +DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG); +DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0); +DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0); +DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0); +DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0); +DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0); +DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0); +DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG); +DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0); +DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0); +DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0); +DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0); +DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0); +DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0); +DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0); +DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0); +DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC); +DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0); +DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG); +DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC); +DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC); +DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0); +DEFINE_QNODE(ipa_core_slave, SLAVE_IPA_CORE, 1, 8, 0); +DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0); +DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0); +DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0); +DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC); +DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC); +DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0); +DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC); +DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0); +DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0); +DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC); +DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC); +DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC); +DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0); +DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0); +DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0); +DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0); +DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0); +DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0); +DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0); + +DEFINE_QBCM(bcm_acv, "ACV", 1, &ebi); +DEFINE_QBCM(bcm_mc0, "MC0", 1, &ebi); +DEFINE_QBCM(bcm_sh0, "SH0", 1, &qns_llcc); +DEFINE_QBCM(bcm_mm0, "MM0", 1, &qns_mem_noc_hf); +DEFINE_QBCM(bcm_sh1, "SH1", 1, &qns_apps_io); +DEFINE_QBCM(bcm_mm1, "MM1", 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1); +DEFINE_QBCM(bcm_sh2, "SH2", 1, &qns_memnoc_snoc); +DEFINE_QBCM(bcm_mm2, "MM2", 1, &qns2_mem_noc); +DEFINE_QBCM(bcm_sh3, "SH3", 1, &acm_tcu); +DEFINE_QBCM(bcm_mm3, "MM3", 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9); +DEFINE_QBCM(bcm_sh5, "SH5", 1, &qnm_apps); +DEFINE_QBCM(bcm_sn0, "SN0", 1, &qns_memnoc_sf); +DEFINE_QBCM(bcm_ce0, "CE0", 1, &qxm_crypto); +DEFINE_QBCM(bcm_ip0, "IP0", 1, &ipa_core_slave); +DEFINE_QBCM(bcm_cn0, "CN0", 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc); +DEFINE_QBCM(bcm_qup0, "QUP0", 2, &qhm_qup1, &qhm_qup2); +DEFINE_QBCM(bcm_sn1, "SN1", 1, &qxs_imem); +DEFINE_QBCM(bcm_sn2, "SN2", 1, &qns_memnoc_gc); +DEFINE_QBCM(bcm_sn3, "SN3", 1, &qns_cnoc); +DEFINE_QBCM(bcm_sn4, "SN4", 1, &qxm_pimem); +DEFINE_QBCM(bcm_sn5, "SN5", 1, &xs_qdss_stm); +DEFINE_QBCM(bcm_sn6, "SN6", 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg); +DEFINE_QBCM(bcm_sn7, "SN7", 1, &qxs_pcie); +DEFINE_QBCM(bcm_sn8, "SN8", 1, &qxs_pcie_gen3); +DEFINE_QBCM(bcm_sn9, "SN9", 2, &srvc_aggre1_noc, &qnm_aggre1_noc); +DEFINE_QBCM(bcm_sn11, "SN11", 2, &srvc_aggre2_noc, &qnm_aggre2_noc); +DEFINE_QBCM(bcm_sn12, "SN12", 2, &qnm_gladiator_sodv, &xm_gic); +DEFINE_QBCM(bcm_sn14, "SN14", 1, &qnm_pcie_anoc); +DEFINE_QBCM(bcm_sn15, "SN15", 1, &qnm_memnoc); + +static struct qcom_icc_node *rsc_hlos_nodes[] = { + &acm_l3, + &acm_tcu, + &ipa_core_master, + &llcc_mc, + &pm_gnoc_cfg, + &qhm_a1noc_cfg, + &qhm_a2noc_cfg, + &qhm_cnoc, + &qhm_memnoc_cfg, + &qhm_mnoc_cfg, + &qhm_qdss_bam, + &qhm_qup1, + &qhm_qup2, + &qhm_snoc_cfg, + &qhm_spdm, + &qhm_tic, + &qhm_tsif, + &qnm_aggre1_noc, + &qnm_aggre2_noc, + &qnm_apps, + &qnm_cnoc, + &qnm_gladiator_sodv, + &qnm_memnoc, + &qnm_mnoc_hf, + &qnm_mnoc_sf, + &qnm_pcie_anoc, + &qnm_snoc, + &qnm_snoc_gc, + &qnm_snoc_sf, + &qxm_camnoc_hf0, + &qxm_camnoc_hf0_uncomp, + &qxm_camnoc_hf1, + &qxm_camnoc_hf1_uncomp, + &qxm_camnoc_sf, + &qxm_camnoc_sf_uncomp, + &qxm_crypto, + &qxm_gpu, + &qxm_ipa, + &qxm_mdp0, + &qxm_mdp1, + &qxm_pimem, + &qxm_rot, + &qxm_venus0, + &qxm_venus1, + &qxm_venus_arm9, + &xm_gic, + &xm_pcie3_1, + &xm_pcie_0, + &xm_qdss_dap, + &xm_qdss_etr, + &xm_sdc2, + &xm_sdc4, + &xm_ufs_card, + &xm_ufs_mem, + &xm_usb3_0, + &xm_usb3_1, + &ebi, + &ipa_core_slave, + &qhs_a1_noc_cfg, + &qhs_a2_noc_cfg, + &qhs_aop, + &qhs_aoss, + &qhs_apss, + &qhs_camera_cfg, + &qhs_clk_ctl, + &qhs_compute_dsp_cfg, + &qhs_cpr_cx, + &qhs_crypto0_cfg, + &qhs_dcc_cfg, + &qhs_ddrss_cfg, + &qhs_display_cfg, + &qhs_glm, + &qhs_gpuss_cfg, + &qhs_imem_cfg, + &qhs_ipa, + &qhs_llcc, + &qhs_mdsp_ms_mpu_cfg, + &qhs_memnoc, + &qhs_mnoc_cfg, + &qhs_pcie0_cfg, + &qhs_pcie_gen3_cfg, + &qhs_pdm, + &qhs_phy_refgen_south, + &qhs_pimem_cfg, + &qhs_prng, + &qhs_qdss_cfg, + &qhs_qupv3_north, + &qhs_qupv3_south, + &qhs_sdc2, + &qhs_sdc4, + &qhs_snoc_cfg, + &qhs_spdm, + &qhs_spss_cfg, + &qhs_tcsr, + &qhs_tlmm_north, + &qhs_tlmm_south, + &qhs_tsif, + &qhs_ufs_card_cfg, + &qhs_ufs_mem_cfg, + &qhs_usb3_0, + &qhs_usb3_1, + &qhs_venus_cfg, + &qhs_vsense_ctrl_cfg, + &qns2_mem_noc, + &qns_a1noc_snoc, + &qns_a2noc_snoc, + &qns_apps_io, + &qns_camnoc_uncomp, + &qns_cnoc, + &qns_cnoc_a2noc, + &qns_gladiator_sodv, + &qns_gnoc_memnoc, + &qns_llcc, + &qns_mem_noc_hf, + &qns_memnoc_gc, + &qns_memnoc_sf, + &qns_memnoc_snoc, + &qns_pcie_a1noc_snoc, + &qns_pcie_snoc, + &qxs_imem, + &qxs_pcie, + &qxs_pcie_gen3, + &qxs_pimem, + &srvc_aggre1_noc, + &srvc_aggre2_noc, + &srvc_cnoc, + &srvc_gnoc, + &srvc_memnoc, + &srvc_mnoc, + &srvc_snoc, + &xs_qdss_stm, + &xs_sys_tcu_cfg, +}; + +static struct qcom_icc_bcm *rsc_hlos_bcms[] = { + &bcm_acv, + &bcm_mc0, + &bcm_sh0, + &bcm_mm0, + &bcm_sh1, + &bcm_mm1, + &bcm_sh2, + &bcm_mm2, + &bcm_sh3, + &bcm_mm3, + &bcm_sh5, + &bcm_sn0, + &bcm_ce0, + &bcm_ip0, + &bcm_cn0, + &bcm_qup0, + &bcm_sn1, + &bcm_sn2, + &bcm_sn3, + &bcm_sn4, + &bcm_sn5, + &bcm_sn6, + &bcm_sn7, + &bcm_sn8, + &bcm_sn9, + &bcm_sn11, + &bcm_sn12, + &bcm_sn14, + &bcm_sn15, +}; + +static struct qcom_icc_desc sdm845_rsc_hlos = { + .nodes = rsc_hlos_nodes, + .num_nodes = ARRAY_SIZE(rsc_hlos_nodes), + .bcms = rsc_hlos_bcms, + .num_bcms = ARRAY_SIZE(rsc_hlos_bcms), +}; + +static int qcom_icc_init(struct icc_node *node) +{ + /* TODO: init qos and priority */ + + return 0; +} + +static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev) +{ + struct bcm_db buf = {0}; + struct qcom_icc_node *qn; + int ret, i; + + bcm->addr = cmd_db_read_addr(bcm->name); + if (!bcm->addr) { + dev_err(dev, "%s could not find RPMh address\n", + bcm->name); + return -EINVAL; + } + + if (!cmd_db_read_aux_data_len(bcm->name)) { + dev_err(dev, "%s command db missing aux data\n", + bcm->name); + return -EINVAL; + } + + ret = cmd_db_read_aux_data(bcm->name, (u8 *)&buf, + sizeof(struct bcm_db)); + if (ret < 0) { + dev_err(dev, "%s command db read error (%d)\n", + bcm->name, ret); + return ret; + } + + bcm->aux_data = buf; + + for (i = 0; i < bcm->num_nodes; i++){ + qn = bcm->nodes[i]; + qn->bcms[qn->num_bcms] = bcm; + qn->num_bcms++; + } + + return 0; +} + +static int qcom_tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, + u64 vote_y, u32 addr, bool commit) +{ + int ret = 0; + bool valid = true; + + if (!cmd) + return ret; + + if (vote_x == 0 && vote_y == 0) + valid = false; + + if (vote_x > BCM_TCS_CMD_VOTE_MASK) + vote_x = BCM_TCS_CMD_VOTE_MASK; + + if (vote_y > BCM_TCS_CMD_VOTE_MASK) + vote_y = BCM_TCS_CMD_VOTE_MASK; + + cmd->addr = addr; + cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y); + cmd->wait = commit; + + return ret; +} + +static void qcom_tcs_list_gen(struct list_head *bcm_list, + struct tcs_cmd *tcs_list, int *n) +{ + struct qcom_icc_bcm *bcm; + bool commit; + size_t idx = 0, batch = 0, cur_vcd_size = 0; + + memset(n, 0, sizeof(int) * SDM845_MAX_VCD); + + list_for_each_entry(bcm, bcm_list, list){ + commit = false; + cur_vcd_size++; + if((bcm->aux_data.vcd != + list_next_entry(bcm, list)->aux_data.vcd) || + list_is_last(&bcm->list, bcm_list)) { + commit = true; + cur_vcd_size = 0; + } + qcom_tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y, + bcm->addr, commit); + idx++; + n[batch]++; + if (n[batch] >= MAX_RPMH_PAYLOAD && commit == false) { + n[batch] -= cur_vcd_size; + batch++; + n[batch] = cur_vcd_size; + } + } + n[batch+1] = 0; +} + +static void qcom_icc_bcm_aggregate(struct qcom_icc_bcm *bcm) +{ + size_t i; + u64 agg_avg = 0; + u64 agg_peak = 0; + + for (i = 0; i < bcm->num_nodes; i++){ + agg_avg = max(agg_avg, + bcm->nodes[i]->sum_avg * bcm->aux_data.width / + (bcm->nodes[i]->buswidth * bcm->nodes[i]->channels)); + agg_peak = max(agg_peak, + bcm->nodes[i]->max_peak * bcm->aux_data.width / + bcm->nodes[i]->buswidth); + } + + bcm->vote_x = (u64)(agg_avg * 1000ULL / bcm->aux_data.unit); + bcm->vote_y = (u64)(agg_peak * 1000ULL / bcm->aux_data.unit); + bcm->dirty = true; +} + +static void qcom_icc_aggregate(struct icc_node *node) +{ + size_t i; + struct icc_req *r; + struct qcom_icc_node *qn; + u64 agg_avg = 0; + u64 agg_peak = 0; + + qn = node->data; + + hlist_for_each_entry(r, &node->req_list, req_node) { + agg_avg += r->avg_bw; + agg_peak = max(agg_peak, (u64)r->peak_bw); + } + + qn->sum_avg = agg_avg; + qn->max_peak = agg_peak; + + for(i = 0; i < qn->num_bcms; i++) { + qcom_icc_bcm_aggregate(qn->bcms[i]); + } +} + +static int qcom_icc_set(struct icc_node *src, struct icc_node *dst, + u32 avg, u32 peak) +{ + struct qcom_icc_provider *qp; + struct qcom_icc_node *qn; + struct icc_node *node; + struct icc_provider *provider; + struct tcs_cmd amc[SDM845_MAX_BCMS]; + struct list_head commit_list; + int n[SDM845_MAX_VCD]; + int ret = 0, i; + + if (!src) + node = dst; + else + node = src; + + qn = node->data; + provider = node->provider; + qp = to_qcom_provider(node->provider); + + INIT_LIST_HEAD(&commit_list); + + for(i = 0; i < qp->num_bcms; i++){ + if(qp->bcms[i]->dirty == true) { + list_add_tail(&qp->bcms[i]->list, &commit_list); + qp->bcms[i]->dirty = false; + } + } + + qcom_tcs_list_gen(&commit_list, amc, n); + + if (!n[0]) + return ret; + + ret = rpmh_invalidate(qp->rpmh_client); + if (ret){ + pr_err("Error invalidating RPMH client (%d)\n", ret); + return ret; + } + + ret = rpmh_write_batch(qp->rpmh_client, RPMH_ACTIVE_ONLY_STATE, amc, n); + if (ret){ + pr_err("Error sending AMC RPMH requests (%d)\n", ret); + return ret; + } + + /* TODO: collect and send wake and sleep sets */ + return ret; +} + +static int cmp_vcd(const void *_l, const void *_r) +{ + const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm**)_l; + const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm**)_r; + + if (l[0]->aux_data.vcd < r[0]->aux_data.vcd) + return -1; + else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd) + return 0; + else + return 1; +} + +static int qnoc_probe(struct platform_device *pdev) +{ + const struct qcom_icc_desc *desc; + struct qcom_icc_node **qnodes; + struct qcom_icc_provider *qp; + struct resource *res; + struct icc_provider *provider; + size_t num_nodes, i; + int ret; + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + qnodes = desc->nodes; + num_nodes = desc->num_nodes; + + qcom_dev = &pdev->dev; + + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); + if (!qp) + return -ENOMEM; + + provider = &qp->provider; + provider->dev = &pdev->dev; + provider->set = &qcom_icc_set; + provider->aggregate = &qcom_icc_aggregate; + INIT_LIST_HEAD(&provider->nodes); + provider->data = qp; + + qp->rpmh_client = rpmh_get_client(pdev); + qp->bcms = desc->bcms; + qp->num_bcms = desc->num_bcms; + + ret = icc_provider_add(provider); + if (ret) { + dev_err(&pdev->dev, "error adding interconnect provider\n"); + return ret; + } + + for (i = 0; i < num_nodes; i++) { + struct icc_node *node; + int ret; + size_t j; + + node = icc_node_create(qnodes[i]->id); + if (IS_ERR(node)) { + ret = PTR_ERR(node); + goto err; + } + + node->name = qnodes[i]->name; + node->data = qnodes[i]; + icc_node_add(node, provider); + + dev_dbg(&pdev->dev, "registered node %p %s %d\n", node, + qnodes[i]->name, node->id); + + /* populate links */ + for (j = 0; j < qnodes[i]->num_links; j++) + if (qnodes[i]->links[j]) + icc_link_create(node, qnodes[i]->links[j]); + + ret = qcom_icc_init(node); + if (ret) + dev_err(&pdev->dev, "%s init error (%d)\n", node->name, + ret); + } + + for (i = 0; i < qp->num_bcms; i++) { + qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); + } + + sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL); + + platform_set_drvdata(pdev, provider); + dev_info(&pdev->dev, "Registered SDM845 ICC\n"); + + return ret; +err: + icc_provider_del(provider); + return ret; +} + +static int qnoc_remove(struct platform_device *pdev) +{ + struct icc_provider *provider = platform_get_drvdata(pdev); + + icc_provider_del(provider); + + return 0; +} + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,rsc-hlos-sdm845", .data = &sdm845_rsc_hlos }, + { }, +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qnoc_probe, + .remove = qnoc_remove, + .driver = { + .name = "qnoc-sdm845", + .of_match_table = qnoc_of_match, + }, +}; +module_platform_driver(qnoc_driver); + +MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>"); +MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver"); +MODULE_LICENSE("GPL v2");
Introduce Qualcomm SDM845 specific provider driver using the interconnect framework. Change-Id: I18194854a6029e814b2deedc1f5ebae6dffd42bf Signed-off-by: David Dai <daidavid1@codeaurora.org> --- drivers/interconnect/qcom/Kconfig | 6 + drivers/interconnect/qcom/Makefile | 1 + drivers/interconnect/qcom/qcom-icc-ids.h | 142 ++++++ drivers/interconnect/qcom/sdm845.c | 815 +++++++++++++++++++++++++++++++ 4 files changed, 964 insertions(+) create mode 100644 drivers/interconnect/qcom/qcom-icc-ids.h create mode 100644 drivers/interconnect/qcom/sdm845.c