diff mbox series

[v3,1/5] dt-bindings: pci: Add Sophgo SG2042 PCIe host

Message ID 5a784afde48c44b5a8f376f02c5f30ccff8a3312.1736923025.git.unicorn_wang@outlook.com (mailing list archive)
State New
Headers show
Series Add PCIe support to Sophgo SG2042 SoC | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR fail PR summary
conchuod/patch-1-test-1 success .github/scripts/patches/tests/build_rv32_defconfig.sh took 105.49s
conchuod/patch-1-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh took 996.36s
conchuod/patch-1-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh took 1168.81s
conchuod/patch-1-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh took 16.50s
conchuod/patch-1-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh took 17.77s
conchuod/patch-1-test-6 warning .github/scripts/patches/tests/checkpatch.sh took 0.56s
conchuod/patch-1-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh took 37.92s
conchuod/patch-1-test-8 success .github/scripts/patches/tests/header_inline.sh took 0.01s
conchuod/patch-1-test-9 success .github/scripts/patches/tests/kdoc.sh took 0.54s
conchuod/patch-1-test-10 success .github/scripts/patches/tests/module_param.sh took 0.01s
conchuod/patch-1-test-11 success .github/scripts/patches/tests/verify_fixes.sh took 0.00s
conchuod/patch-1-test-12 success .github/scripts/patches/tests/verify_signedoff.sh took 0.02s

Commit Message

Chen Wang Jan. 15, 2025, 7:06 a.m. UTC
From: Chen Wang <unicorn_wang@outlook.com>

Add binding for Sophgo SG2042 PCIe host controller.

Signed-off-by: Chen Wang <unicorn_wang@outlook.com>
---
 .../bindings/pci/sophgo,sg2042-pcie-host.yaml | 147 ++++++++++++++++++
 1 file changed, 147 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml

Comments

Manivannan Sadhasivam Jan. 19, 2025, 11:44 a.m. UTC | #1
On Wed, Jan 15, 2025 at 03:06:37PM +0800, Chen Wang wrote:
> From: Chen Wang <unicorn_wang@outlook.com>
> 
> Add binding for Sophgo SG2042 PCIe host controller.
> 
> Signed-off-by: Chen Wang <unicorn_wang@outlook.com>
> ---
>  .../bindings/pci/sophgo,sg2042-pcie-host.yaml | 147 ++++++++++++++++++
>  1 file changed, 147 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml
> 
> diff --git a/Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml b/Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml
> new file mode 100644
> index 000000000000..f98e71822144
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml
> @@ -0,0 +1,147 @@
> +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
> +%YAML 1.2
> +---
> +$id: http://devicetree.org/schemas/pci/sophgo,sg2042-pcie-host.yaml#
> +$schema: http://devicetree.org/meta-schemas/core.yaml#
> +
> +title: Sophgo SG2042 PCIe Host (Cadence PCIe Wrapper)
> +
> +description:
> +  Sophgo SG2042 PCIe host controller is based on the Cadence PCIe core.
> +
> +maintainers:
> +  - Chen Wang <unicorn_wang@outlook.com>
> +
> +properties:
> +  compatible:
> +    const: sophgo,sg2042-pcie-host
> +
> +  reg:
> +    maxItems: 2
> +
> +  reg-names:
> +    items:
> +      - const: reg
> +      - const: cfg
> +
> +  vendor-id:
> +    const: 0x1f1c
> +
> +  device-id:
> +    const: 0x2042
> +
> +  msi:
> +    type: object
> +    $ref: /schemas/interrupt-controller/msi-controller.yaml#
> +    unevaluatedProperties: false
> +
> +    properties:
> +      compatible:
> +        items:
> +          - const: sophgo,sg2042-pcie-msi
> +
> +      interrupts:
> +        maxItems: 1
> +
> +      interrupt-names:
> +        const: msi
> +
> +  msi-parent: true
> +
> +  sophgo,link-id:
> +    $ref: /schemas/types.yaml#/definitions/uint32
> +    description: |
> +      SG2042 uses Cadence IP, every IP is composed of 2 cores (called link0
> +      & link1 as Cadence's term). Each core corresponds to a host bridge,
> +      and each host bridge has only one root port. Their configuration
> +      registers are completely independent. SG2042 integrates two Cadence IPs,
> +      so there can actually be up to four host bridges. "sophgo,link-id" is
> +      used to identify which core/link the PCIe host bridge node corresponds to.
> +
> +      The Cadence IP has two modes of operation, selected by a strap pin.
> +
> +      In the single-link mode, the Cadence PCIe core instance associated
> +      with Link0 is connected to all the lanes and the Cadence PCIe core
> +      instance associated with Link1 is inactive.
> +
> +      In the dual-link mode, the Cadence PCIe core instance associated
> +      with Link0 is connected to the lower half of the lanes and the
> +      Cadence PCIe core instance associated with Link1 is connected to
> +      the upper half of the lanes.
> +
> +      SG2042 contains 2 Cadence IPs and configures the Cores as below:
> +
> +                     +-- Core (Link0) <---> pcie_rc0  +-----------------+
> +                     |                                |                 |
> +      Cadence IP 1 --+                                | cdns_pcie0_ctrl |
> +                     |                                |                 |
> +                     +-- Core (Link1) <---> disabled  +-----------------+
> +
> +                     +-- Core (Link0) <---> pcie_rc1  +-----------------+
> +                     |                                |                 |
> +      Cadence IP 2 --+                                | cdns_pcie1_ctrl |
> +                     |                                |                 |
> +                     +-- Core (Link1) <---> pcie_rc2  +-----------------+
> +
> +      pcie_rcX is PCIe node ("sophgo,sg2042-pcie-host") defined in DTS.
> +
> +      Sophgo defines some new register files to add support for their MSI
> +      controller inside PCIe. These new register files are defined in DTS as
> +      syscon node ("sophgo,sg2042-pcie-ctrl"), i.e. "cdns_pcie0_ctrl" /
> +      "cdns_pcie1_ctrl". cdns_pcieX_ctrl contains some registers shared by
> +      pcie_rcX, even two RC (Link)s may share different bits of the same
> +      register. For example, cdns_pcie1_ctrl contains registers shared by
> +      link0 & link1 for Cadence IP 2.
> +
> +      "sophgo,link-id" is defined to distinguish the two RC's in one Cadence IP,
> +      so we can know what registers (bits) we should use.
> +
> +  sophgo,syscon-pcie-ctrl:
> +    $ref: /schemas/types.yaml#/definitions/phandle
> +    description:
> +      Phandle to the PCIe System Controller DT node. It's required to
> +      access some MSI operation registers shared by PCIe RCs.
> +
> +allOf:
> +  - $ref: cdns-pcie-host.yaml#
> +
> +required:
> +  - compatible
> +  - reg
> +  - reg-names
> +  - vendor-id
> +  - device-id
> +  - sophgo,link-id
> +  - sophgo,syscon-pcie-ctrl
> +
> +unevaluatedProperties: false
> +
> +examples:
> +  - |
> +    #include <dt-bindings/interrupt-controller/irq.h>
> +
> +    pcie@62000000 {
> +      compatible = "sophgo,sg2042-pcie-host";
> +      device_type = "pci";
> +      reg = <0x62000000  0x00800000>,
> +            <0x48000000  0x00001000>;

Use single space between address and size.

> +      reg-names = "reg", "cfg";
> +      #address-cells = <3>;
> +      #size-cells = <2>;
> +      ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
> +               <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;

For sure you don't need to set 'relocatable' flag for both regions.

> +      bus-range = <0x00 0xff>;
> +      vendor-id = <0x1f1c>;
> +      device-id = <0x2042>;

As Bjorn explained in v2, these properties need to be moved to PCI root port
node. Your argument of a single root port node for a host bridge doesn't add as
we have found that describing the root port properties in host bridge only
creates issues.

Btw, we are migrating the existing single RP platforms too to root port node.

> +      cdns,no-bar-match-nbits = <48>;
> +      sophgo,link-id = <0>;
> +      sophgo,syscon-pcie-ctrl = <&cdns_pcie1_ctrl>;

Where is the num-lanes property?

> +      msi-parent = <&msi_pcie>;
> +      msi_pcie: msi {

'msi' is not a standard node name. 'interrupt-controller' is what usually used
to describe the MSI node.

Btw, is the MSI controller a separate IP inside the host bridge? If not, there
would no need to add a separate node. Most of the host bridge IPs implementing
MSI controller, do not use a separate node.

- Mani
Chen Wang Jan. 22, 2025, 12:52 p.m. UTC | #2
On 2025/1/19 19:44, Manivannan Sadhasivam wrote:
> On Wed, Jan 15, 2025 at 03:06:37PM +0800, Chen Wang wrote:
>> From: Chen Wang <unicorn_wang@outlook.com>
>>
>> Add binding for Sophgo SG2042 PCIe host controller.
>>
>> Signed-off-by: Chen Wang <unicorn_wang@outlook.com>
>> ---
>>   .../bindings/pci/sophgo,sg2042-pcie-host.yaml | 147 ++++++++++++++++++
>>   1 file changed, 147 insertions(+)
>>   create mode 100644 Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml

[......]

>> +examples:
>> +  - |
>> +    #include <dt-bindings/interrupt-controller/irq.h>
>> +
>> +    pcie@62000000 {
>> +      compatible = "sophgo,sg2042-pcie-host";
>> +      device_type = "pci";
>> +      reg = <0x62000000  0x00800000>,
>> +            <0x48000000  0x00001000>;
> Use single space between address and size.
ok, thanks.
>> +      reg-names = "reg", "cfg";
>> +      #address-cells = <3>;
>> +      #size-cells = <2>;
>> +      ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
>> +               <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;
> For sure you don't need to set 'relocatable' flag for both regions.
ok, I will correct this in next version.
>> +      bus-range = <0x00 0xff>;
>> +      vendor-id = <0x1f1c>;
>> +      device-id = <0x2042>;
> As Bjorn explained in v2, these properties need to be moved to PCI root port
> node. Your argument of a single root port node for a host bridge doesn't add as
> we have found that describing the root port properties in host bridge only
> creates issues.

Got it. I will try to change this in next version.

> Btw, we are migrating the existing single RP platforms too to root port node.
>
>> +      cdns,no-bar-match-nbits = <48>;
>> +      sophgo,link-id = <0>;
>> +      sophgo,syscon-pcie-ctrl = <&cdns_pcie1_ctrl>;
> Where is the num-lanes property?
Is this num-lanes a must-have property? The lane number of each link on 
the SG2042 is hard-coded in the firmware, so it seems meaningless to 
configure it.
>> +      msi-parent = <&msi_pcie>;
>> +      msi_pcie: msi {
> 'msi' is not a standard node name. 'interrupt-controller' is what usually used
> to describe the MSI node.
OK. I will corret this.
> Btw, is the MSI controller a separate IP inside the host bridge? If not, there
> would no need to add a separate node. Most of the host bridge IPs implementing
> MSI controller, do not use a separate node.

Yes, it's a separated IP inside the host bridge.

> - Mani

Thanks,

Chen
Manivannan Sadhasivam Jan. 22, 2025, 5:21 p.m. UTC | #3
On Wed, Jan 22, 2025 at 08:52:39PM +0800, Chen Wang wrote:
> 
> On 2025/1/19 19:44, Manivannan Sadhasivam wrote:
> > On Wed, Jan 15, 2025 at 03:06:37PM +0800, Chen Wang wrote:
> > > From: Chen Wang <unicorn_wang@outlook.com>
> > > 
> > > Add binding for Sophgo SG2042 PCIe host controller.
> > > 
> > > Signed-off-by: Chen Wang <unicorn_wang@outlook.com>
> > > ---
> > >   .../bindings/pci/sophgo,sg2042-pcie-host.yaml | 147 ++++++++++++++++++
> > >   1 file changed, 147 insertions(+)
> > >   create mode 100644 Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml
> 
> [......]
> 
> > > +examples:
> > > +  - |
> > > +    #include <dt-bindings/interrupt-controller/irq.h>
> > > +
> > > +    pcie@62000000 {
> > > +      compatible = "sophgo,sg2042-pcie-host";
> > > +      device_type = "pci";
> > > +      reg = <0x62000000  0x00800000>,
> > > +            <0x48000000  0x00001000>;
> > Use single space between address and size.
> ok, thanks.
> > > +      reg-names = "reg", "cfg";
> > > +      #address-cells = <3>;
> > > +      #size-cells = <2>;
> > > +      ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
> > > +               <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;
> > For sure you don't need to set 'relocatable' flag for both regions.
> ok, I will correct this in next version.
> > > +      bus-range = <0x00 0xff>;
> > > +      vendor-id = <0x1f1c>;
> > > +      device-id = <0x2042>;
> > As Bjorn explained in v2, these properties need to be moved to PCI root port
> > node. Your argument of a single root port node for a host bridge doesn't add as
> > we have found that describing the root port properties in host bridge only
> > creates issues.
> 
> Got it. I will try to change this in next version.
> 
> > Btw, we are migrating the existing single RP platforms too to root port node.
> > 
> > > +      cdns,no-bar-match-nbits = <48>;
> > > +      sophgo,link-id = <0>;
> > > +      sophgo,syscon-pcie-ctrl = <&cdns_pcie1_ctrl>;
> > Where is the num-lanes property?
> Is this num-lanes a must-have property? The lane number of each link on the
> SG2042 is hard-coded in the firmware, so it seems meaningless to configure
> it.

It is not a requirement, but most of the controllers define it so that the
drivers can use it to set Max Link Width, Link speed etc.... You can skip it if
you want.

> > > +      msi-parent = <&msi_pcie>;
> > > +      msi_pcie: msi {
> > 'msi' is not a standard node name. 'interrupt-controller' is what usually used
> > to describe the MSI node.
> OK. I will corret this.

There is also 'msi-controller' node name used commonly, but it is not
documented in the devicetree spec. Maybe you can use it instead and I'll add it
to the spec since MSI and interrupt controllers are two distinct controllers.

> > Btw, is the MSI controller a separate IP inside the host bridge? If not, there
> > would no need to add a separate node. Most of the host bridge IPs implementing
> > MSI controller, do not use a separate node.
> 
> Yes, it's a separated IP inside the host bridge.
> 

Ok.
Bjorn Helgaas Jan. 22, 2025, 10:21 p.m. UTC | #4
On Wed, Jan 15, 2025 at 03:06:37PM +0800, Chen Wang wrote:
> From: Chen Wang <unicorn_wang@outlook.com>
> 
> Add binding for Sophgo SG2042 PCIe host controller.

> +  sophgo,link-id:
> +    $ref: /schemas/types.yaml#/definitions/uint32
> +    description: |
> +      SG2042 uses Cadence IP, every IP is composed of 2 cores (called link0
> +      & link1 as Cadence's term). Each core corresponds to a host bridge,
> +      and each host bridge has only one root port. Their configuration
> +      registers are completely independent. SG2042 integrates two Cadence IPs,
> +      so there can actually be up to four host bridges. "sophgo,link-id" is
> +      used to identify which core/link the PCIe host bridge node corresponds to.

IIUC, the registers of Cadence IP 1 and IP 2 are completely
independent, and if you describe both of them, you would have separate
"pcie@62000000" stanzas with separate 'reg' and 'ranges' properties.

From the driver, it does not look like the registers for Link0 and
Link1 are independent, since the driver claims the
"sophgo,sg2042-pcie-host", which includes two Cores, and it tests
pcie->link_id to select the correct register address and bit mask.

"sophgo,link-id" corresponds to Cadence documentation, but I think it
is somewhat misleading in the binding because a PCIe "Link" refers to
the downstream side of a Root Port.  If we use "link-id" to identify
either Core0 or Core1 of a Cadence IP, it sort of bakes in the
idea that there can never be more than one Root Port per Core.

Since each Core is the root of a separate PCI hierarchy, it seems like
maybe there should be a stanza for the Core so there's a place where
per-hierarchy things like "linux,pci-domain" properties could go,
e.g.,

  pcie@62000000 {		// IP 1, single-link mode
    compatible = "sophgo,sg2042-pcie-host";
    reg = <...>;
    ranges = <...>;

    core0 {
      sophgo,core-id = <0>;
      linux,pci-domain = <0>;

      port {
        num-lanes = <4>;	// all lanes
      };
    };
  };

  pcie@82000000 {		// IP 2, dual-link mode
    compatible = "sophgo,sg2042-pcie-host";
    reg = <...>;
    ranges = <...>;

    core0 {
      sophgo,core-id = <0>;
      linux,pci-domain = <1>;

      port {
        num-lanes = <2>;	// half of lanes
      };
    };

    core1 {
      sophgo,core-id = <1>;
      linux,pci-domain = <2>;

      port {
        num-lanes = <2>;	// half of lanes
      };
    };
  };

> +      The Cadence IP has two modes of operation, selected by a strap pin.
> +
> +      In the single-link mode, the Cadence PCIe core instance associated
> +      with Link0 is connected to all the lanes and the Cadence PCIe core
> +      instance associated with Link1 is inactive.
> +
> +      In the dual-link mode, the Cadence PCIe core instance associated
> +      with Link0 is connected to the lower half of the lanes and the
> +      Cadence PCIe core instance associated with Link1 is connected to
> +      the upper half of the lanes.
> +
> +      SG2042 contains 2 Cadence IPs and configures the Cores as below:
> +
> +                     +-- Core (Link0) <---> pcie_rc0  +-----------------+
> +                     |                                |                 |
> +      Cadence IP 1 --+                                | cdns_pcie0_ctrl |
> +                     |                                |                 |
> +                     +-- Core (Link1) <---> disabled  +-----------------+
> +
> +                     +-- Core (Link0) <---> pcie_rc1  +-----------------+
> +                     |                                |                 |
> +      Cadence IP 2 --+                                | cdns_pcie1_ctrl |
> +                     |                                |                 |
> +                     +-- Core (Link1) <---> pcie_rc2  +-----------------+
> +
> +      pcie_rcX is PCIe node ("sophgo,sg2042-pcie-host") defined in DTS.
> +
> +      Sophgo defines some new register files to add support for their MSI
> +      controller inside PCIe. These new register files are defined in DTS as
> +      syscon node ("sophgo,sg2042-pcie-ctrl"), i.e. "cdns_pcie0_ctrl" /
> +      "cdns_pcie1_ctrl". cdns_pcieX_ctrl contains some registers shared by
> +      pcie_rcX, even two RC (Link)s may share different bits of the same
> +      register. For example, cdns_pcie1_ctrl contains registers shared by
> +      link0 & link1 for Cadence IP 2.
> +
> +      "sophgo,link-id" is defined to distinguish the two RC's in one Cadence IP,
> +      so we can know what registers (bits) we should use.

> +examples:
> +  - |
> +    #include <dt-bindings/interrupt-controller/irq.h>
> +
> +    pcie@62000000 {
> +      compatible = "sophgo,sg2042-pcie-host";
> +      device_type = "pci";
> +      reg = <0x62000000  0x00800000>,
> +            <0x48000000  0x00001000>;
> +      reg-names = "reg", "cfg";
> +      #address-cells = <3>;
> +      #size-cells = <2>;
> +      ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
> +               <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;
> +      bus-range = <0x00 0xff>;
> +      vendor-id = <0x1f1c>;
> +      device-id = <0x2042>;
> +      cdns,no-bar-match-nbits = <48>;
> +      sophgo,link-id = <0>;
> +      sophgo,syscon-pcie-ctrl = <&cdns_pcie1_ctrl>;
> +      msi-parent = <&msi_pcie>;
> +      msi_pcie: msi {
> +        compatible = "sophgo,sg2042-pcie-msi";
> +        msi-controller;
> +        interrupt-parent = <&intc>;
> +        interrupts = <123 IRQ_TYPE_LEVEL_HIGH>;
> +        interrupt-names = "msi";
> +      };
> +    };

It would be helpful for me if the example showed how both link-id 0
and link-id 1 would be used (or whatever they end up being named).
I assume both have to be somewhere in the same pcie@62000000 device to
make this work.

Bjorn
Chen Wang Jan. 26, 2025, 12:29 a.m. UTC | #5
On 2025/1/23 1:21, Manivannan Sadhasivam wrote:
> On Wed, Jan 22, 2025 at 08:52:39PM +0800, Chen Wang wrote:
>> On 2025/1/19 19:44, Manivannan Sadhasivam wrote:
[......]
>>>> +      msi-parent = <&msi_pcie>;
>>>> +      msi_pcie: msi {
>>> 'msi' is not a standard node name. 'interrupt-controller' is what usually used
>>> to describe the MSI node.
>> OK. I will corret this.
> There is also 'msi-controller' node name used commonly, but it is not
> documented in the devicetree spec. Maybe you can use it instead and I'll add it
> to the spec since MSI and interrupt controllers are two distinct controllers.

Yes, "msi-controller" is better, I will use this, thanks.

Chen

[......]
Chen Wang Jan. 26, 2025, 2:27 a.m. UTC | #6
hello~

On 2025/1/23 6:21, Bjorn Helgaas wrote:
> On Wed, Jan 15, 2025 at 03:06:37PM +0800, Chen Wang wrote:
>> From: Chen Wang <unicorn_wang@outlook.com>
>>
>> Add binding for Sophgo SG2042 PCIe host controller.
>> +  sophgo,link-id:
>> +    $ref: /schemas/types.yaml#/definitions/uint32
>> +    description: |
>> +      SG2042 uses Cadence IP, every IP is composed of 2 cores (called link0
>> +      & link1 as Cadence's term). Each core corresponds to a host bridge,
>> +      and each host bridge has only one root port. Their configuration
>> +      registers are completely independent. SG2042 integrates two Cadence IPs,
>> +      so there can actually be up to four host bridges. "sophgo,link-id" is
>> +      used to identify which core/link the PCIe host bridge node corresponds to.
> IIUC, the registers of Cadence IP 1 and IP 2 are completely
> independent, and if you describe both of them, you would have separate
> "pcie@62000000" stanzas with separate 'reg' and 'ranges' properties.

To be precise, for two cores of a cadence IP, each core has a separate 
set of configuration registers, that is, the configuration of each core 
is completely independent. This is also what I meant in the binding by 
"Each core corresponds to a host bridge, and each host bridge has only 
one root port. Their configuration registers are completely 
independent.". Maybe the "Their" here is a bit unclear. My original 
intention was to refer to the core. I can improve this description next 
time.

>  From the driver, it does not look like the registers for Link0 and
> Link1 are independent, since the driver claims the
> "sophgo,sg2042-pcie-host", which includes two Cores, and it tests
> pcie->link_id to select the correct register address and bit mask.
In the driver code, one "sophgo,sg2042-pcie-host" corresponds to one 
core, not two. So, you can see in patch 4 of this pathset [1], 3 pcie 
host-bridge nodes are defined, pcie_rc0 ~ pcie_rc2, each corresponding 
to one core.

[1]:https://lore.kernel.org/linux-riscv/4a1f23e5426bfb56cad9c07f90d4efaad5eab976.1736923025.git.unicorn_wang@outlook.com/


I also need to explain that link0 and link1 are actually completely 
independent in PCIE processing, but when sophgo implements the internal 
msi controller for PCIE, its design is not good enough, and the 
registers for processing msi are not made separately for link0 and 
link1, but mixed together, which is what I said 
cdns_pcie0_ctrl/cdns_pcie1_ctrl. In these two new register files added 
by sophgo (only involving MSI processing), take the second cadence IP as 
an example, some registers are used to control the msi controller of 
pcie_rc1 (corresponding to link0), and some registers are used to 
control the msi controller of pcie_rc2 (corresponding to link1). In a 
more complicated situation, some bits in a register are used to control 
pcie_rc1, and some bits are used to control pcie_rc2. This is why I have 
to add the link_id attribute to know whether the current PCIe host 
bridge corresponds to link0 or link1, so that when processing the msi 
controller related to this pcie host bridge, we can find the 
corresponding register or even the bit of a register in cdns_pcieX_ctrl.


> "sophgo,link-id" corresponds to Cadence documentation, but I think it
> is somewhat misleading in the binding because a PCIe "Link" refers to
> the downstream side of a Root Port.  If we use "link-id" to identify
> either Core0 or Core1 of a Cadence IP, it sort of bakes in the
> idea that there can never be more than one Root Port per Core.
The fact is that for the cadence IP used by sg2042, only one root port 
is supported per core.
>
> Since each Core is the root of a separate PCI hierarchy, it seems like
> maybe there should be a stanza for the Core so there's a place where
> per-hierarchy things like "linux,pci-domain" properties could go,
> e.g.,
>
>    pcie@62000000 {		// IP 1, single-link mode
>      compatible = "sophgo,sg2042-pcie-host";
>      reg = <...>;
>      ranges = <...>;
>
>      core0 {
>        sophgo,core-id = <0>;
>        linux,pci-domain = <0>;
>
>        port {
>          num-lanes = <4>;	// all lanes
>        };
>      };
>    };
>
>    pcie@82000000 {		// IP 2, dual-link mode
>      compatible = "sophgo,sg2042-pcie-host";
>      reg = <...>;
>      ranges = <...>;
>
>      core0 {
>        sophgo,core-id = <0>;
>        linux,pci-domain = <1>;
>
>        port {
>          num-lanes = <2>;	// half of lanes
>        };
>      };
>
>      core1 {
>        sophgo,core-id = <1>;
>        linux,pci-domain = <2>;
>
>        port {
>          num-lanes = <2>;	// half of lanes
>        };
>      };
>    };

Based on the above analysis, I think the introduction of a three-layer 
structure (pcie-core-port) looks a bit too complicated for candence IP. 
In fact, the source of the discussion at the beginning of this issue was 
whether some attributes should be placed under the host bridge or the 
root port. I suggest that adding the root port layer on the basis of the 
existing patch may be enough. What do you think?

e.g.,

pcie_rc0: pcie@7060000000 {
     compatible = "sophgo,sg2042-pcie-host";
     ...... // host bride level properties
     sophgo,link-id = <0>;
     port {
         // port level properties
         vendor-id = <0x1f1c>;
         device-id = <0x2042>;
         num-lanes = <4>;
     }
};

pcie_rc1: pcie@7062000000 {
     compatible = "sophgo,sg2042-pcie-host";
     ...... // host bride level properties
     sophgo,link-id = <0>;
     port {
         // port level properties
         vendor-id = <0x1f1c>;
         device-id = <0x2042>;
         num-lanes = <2>;
     };
};

pcie_rc2: pcie@7062800000 {
     compatible = "sophgo,sg2042-pcie-host";
     ...... // host bride level properties
     sophgo,link-id = <0>;
     port {
         // port level properties
         vendor-id = <0x1f1c>;
         device-id = <0x2042>;
         num-lanes = <2>;
     }
};

[......]

>> +examples:
>> +  - |
>> +    #include <dt-bindings/interrupt-controller/irq.h>
>> +
>> +    pcie@62000000 {
>> +      compatible = "sophgo,sg2042-pcie-host";
>> +      device_type = "pci";
>> +      reg = <0x62000000  0x00800000>,
>> +            <0x48000000  0x00001000>;
>> +      reg-names = "reg", "cfg";
>> +      #address-cells = <3>;
>> +      #size-cells = <2>;
>> +      ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
>> +               <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;
>> +      bus-range = <0x00 0xff>;
>> +      vendor-id = <0x1f1c>;
>> +      device-id = <0x2042>;
>> +      cdns,no-bar-match-nbits = <48>;
>> +      sophgo,link-id = <0>;
>> +      sophgo,syscon-pcie-ctrl = <&cdns_pcie1_ctrl>;
>> +      msi-parent = <&msi_pcie>;
>> +      msi_pcie: msi {
>> +        compatible = "sophgo,sg2042-pcie-msi";
>> +        msi-controller;
>> +        interrupt-parent = <&intc>;
>> +        interrupts = <123 IRQ_TYPE_LEVEL_HIGH>;
>> +        interrupt-names = "msi";
>> +      };
>> +    };
> It would be helpful for me if the example showed how both link-id 0
> and link-id 1 would be used (or whatever they end up being named).
> I assume both have to be somewhere in the same pcie@62000000 device to
> make this work.
>
> Bjorn
Chen Wang Feb. 3, 2025, 2:35 a.m. UTC | #7
hi, Bjorn, what do you think of my input?

Regards,

Chen

On 2025/1/26 10:27, Chen Wang wrote:
> hello~
>
> On 2025/1/23 6:21, Bjorn Helgaas wrote:
>> On Wed, Jan 15, 2025 at 03:06:37PM +0800, Chen Wang wrote:
>>> From: Chen Wang <unicorn_wang@outlook.com>
>>>
>>> Add binding for Sophgo SG2042 PCIe host controller.
>>> +  sophgo,link-id:
>>> +    $ref: /schemas/types.yaml#/definitions/uint32
>>> +    description: |
>>> +      SG2042 uses Cadence IP, every IP is composed of 2 cores 
>>> (called link0
>>> +      & link1 as Cadence's term). Each core corresponds to a host 
>>> bridge,
>>> +      and each host bridge has only one root port. Their configuration
>>> +      registers are completely independent. SG2042 integrates two 
>>> Cadence IPs,
>>> +      so there can actually be up to four host bridges. 
>>> "sophgo,link-id" is
>>> +      used to identify which core/link the PCIe host bridge node 
>>> corresponds to.
>> IIUC, the registers of Cadence IP 1 and IP 2 are completely
>> independent, and if you describe both of them, you would have separate
>> "pcie@62000000" stanzas with separate 'reg' and 'ranges' properties.
>
> To be precise, for two cores of a cadence IP, each core has a separate 
> set of configuration registers, that is, the configuration of each 
> core is completely independent. This is also what I meant in the 
> binding by "Each core corresponds to a host bridge, and each host 
> bridge has only one root port. Their configuration registers are 
> completely independent.". Maybe the "Their" here is a bit unclear. My 
> original intention was to refer to the core. I can improve this 
> description next time.
>
>>  From the driver, it does not look like the registers for Link0 and
>> Link1 are independent, since the driver claims the
>> "sophgo,sg2042-pcie-host", which includes two Cores, and it tests
>> pcie->link_id to select the correct register address and bit mask.
> In the driver code, one "sophgo,sg2042-pcie-host" corresponds to one 
> core, not two. So, you can see in patch 4 of this pathset [1], 3 pcie 
> host-bridge nodes are defined, pcie_rc0 ~ pcie_rc2, each corresponding 
> to one core.
>
> [1]:https://lore.kernel.org/linux-riscv/4a1f23e5426bfb56cad9c07f90d4efaad5eab976.1736923025.git.unicorn_wang@outlook.com/ 
>
>
>
> I also need to explain that link0 and link1 are actually completely 
> independent in PCIE processing, but when sophgo implements the 
> internal msi controller for PCIE, its design is not good enough, and 
> the registers for processing msi are not made separately for link0 and 
> link1, but mixed together, which is what I said 
> cdns_pcie0_ctrl/cdns_pcie1_ctrl. In these two new register files added 
> by sophgo (only involving MSI processing), take the second cadence IP 
> as an example, some registers are used to control the msi controller 
> of pcie_rc1 (corresponding to link0), and some registers are used to 
> control the msi controller of pcie_rc2 (corresponding to link1). In a 
> more complicated situation, some bits in a register are used to 
> control pcie_rc1, and some bits are used to control pcie_rc2. This is 
> why I have to add the link_id attribute to know whether the current 
> PCIe host bridge corresponds to link0 or link1, so that when 
> processing the msi controller related to this pcie host bridge, we can 
> find the corresponding register or even the bit of a register in 
> cdns_pcieX_ctrl.
>
>
>> "sophgo,link-id" corresponds to Cadence documentation, but I think it
>> is somewhat misleading in the binding because a PCIe "Link" refers to
>> the downstream side of a Root Port.  If we use "link-id" to identify
>> either Core0 or Core1 of a Cadence IP, it sort of bakes in the
>> idea that there can never be more than one Root Port per Core.
> The fact is that for the cadence IP used by sg2042, only one root port 
> is supported per core.
>>
>> Since each Core is the root of a separate PCI hierarchy, it seems like
>> maybe there should be a stanza for the Core so there's a place where
>> per-hierarchy things like "linux,pci-domain" properties could go,
>> e.g.,
>>
>>    pcie@62000000 {        // IP 1, single-link mode
>>      compatible = "sophgo,sg2042-pcie-host";
>>      reg = <...>;
>>      ranges = <...>;
>>
>>      core0 {
>>        sophgo,core-id = <0>;
>>        linux,pci-domain = <0>;
>>
>>        port {
>>          num-lanes = <4>;    // all lanes
>>        };
>>      };
>>    };
>>
>>    pcie@82000000 {        // IP 2, dual-link mode
>>      compatible = "sophgo,sg2042-pcie-host";
>>      reg = <...>;
>>      ranges = <...>;
>>
>>      core0 {
>>        sophgo,core-id = <0>;
>>        linux,pci-domain = <1>;
>>
>>        port {
>>          num-lanes = <2>;    // half of lanes
>>        };
>>      };
>>
>>      core1 {
>>        sophgo,core-id = <1>;
>>        linux,pci-domain = <2>;
>>
>>        port {
>>          num-lanes = <2>;    // half of lanes
>>        };
>>      };
>>    };
>
> Based on the above analysis, I think the introduction of a three-layer 
> structure (pcie-core-port) looks a bit too complicated for candence 
> IP. In fact, the source of the discussion at the beginning of this 
> issue was whether some attributes should be placed under the host 
> bridge or the root port. I suggest that adding the root port layer on 
> the basis of the existing patch may be enough. What do you think?
>
> e.g.,
>
> pcie_rc0: pcie@7060000000 {
>     compatible = "sophgo,sg2042-pcie-host";
>     ...... // host bride level properties
>     sophgo,link-id = <0>;
>     port {
>         // port level properties
>         vendor-id = <0x1f1c>;
>         device-id = <0x2042>;
>         num-lanes = <4>;
>     }
> };
>
> pcie_rc1: pcie@7062000000 {
>     compatible = "sophgo,sg2042-pcie-host";
>     ...... // host bride level properties
>     sophgo,link-id = <0>;
>     port {
>         // port level properties
>         vendor-id = <0x1f1c>;
>         device-id = <0x2042>;
>         num-lanes = <2>;
>     };
> };
>
> pcie_rc2: pcie@7062800000 {
>     compatible = "sophgo,sg2042-pcie-host";
>     ...... // host bride level properties
>     sophgo,link-id = <0>;
>     port {
>         // port level properties
>         vendor-id = <0x1f1c>;
>         device-id = <0x2042>;
>         num-lanes = <2>;
>     }
> };
>
> [......]
>
>>> +examples:
>>> +  - |
>>> +    #include <dt-bindings/interrupt-controller/irq.h>
>>> +
>>> +    pcie@62000000 {
>>> +      compatible = "sophgo,sg2042-pcie-host";
>>> +      device_type = "pci";
>>> +      reg = <0x62000000  0x00800000>,
>>> +            <0x48000000  0x00001000>;
>>> +      reg-names = "reg", "cfg";
>>> +      #address-cells = <3>;
>>> +      #size-cells = <2>;
>>> +      ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
>>> +               <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;
>>> +      bus-range = <0x00 0xff>;
>>> +      vendor-id = <0x1f1c>;
>>> +      device-id = <0x2042>;
>>> +      cdns,no-bar-match-nbits = <48>;
>>> +      sophgo,link-id = <0>;
>>> +      sophgo,syscon-pcie-ctrl = <&cdns_pcie1_ctrl>;
>>> +      msi-parent = <&msi_pcie>;
>>> +      msi_pcie: msi {
>>> +        compatible = "sophgo,sg2042-pcie-msi";
>>> +        msi-controller;
>>> +        interrupt-parent = <&intc>;
>>> +        interrupts = <123 IRQ_TYPE_LEVEL_HIGH>;
>>> +        interrupt-names = "msi";
>>> +      };
>>> +    };
>> It would be helpful for me if the example showed how both link-id 0
>> and link-id 1 would be used (or whatever they end up being named).
>> I assume both have to be somewhere in the same pcie@62000000 device to
>> make this work.
>>
>> Bjorn
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
Bjorn Helgaas Feb. 11, 2025, 11:34 p.m. UTC | #8
On Sun, Jan 26, 2025 at 10:27:27AM +0800, Chen Wang wrote:
> On 2025/1/23 6:21, Bjorn Helgaas wrote:
> > On Wed, Jan 15, 2025 at 03:06:37PM +0800, Chen Wang wrote:
> > > From: Chen Wang <unicorn_wang@outlook.com>
> > > 
> > > Add binding for Sophgo SG2042 PCIe host controller.
> > > +  sophgo,link-id:
> > > +    $ref: /schemas/types.yaml#/definitions/uint32
> > > +    description: |
> > > +      SG2042 uses Cadence IP, every IP is composed of 2 cores (called link0
> > > +      & link1 as Cadence's term). Each core corresponds to a host bridge,
> > > +      and each host bridge has only one root port. Their configuration
> > > +      registers are completely independent. SG2042 integrates two Cadence IPs,
> > > +      so there can actually be up to four host bridges. "sophgo,link-id" is
> > > +      used to identify which core/link the PCIe host bridge node corresponds to.
> > IIUC, the registers of Cadence IP 1 and IP 2 are completely
> > independent, and if you describe both of them, you would have separate
> > "pcie@62000000" stanzas with separate 'reg' and 'ranges' properties.
> 
> To be precise, for two cores of a cadence IP, each core has a separate set
> of configuration registers, that is, the configuration of each core is
> completely independent. This is also what I meant in the binding by "Each
> core corresponds to a host bridge, and each host bridge has only one root
> port. Their configuration registers are completely independent.". Maybe the
> "Their" here is a bit unclear. My original intention was to refer to the
> core. I can improve this description next time.
> 
> >  From the driver, it does not look like the registers for Link0 and
> > Link1 are independent, since the driver claims the
> > "sophgo,sg2042-pcie-host", which includes two Cores, and it tests
> > pcie->link_id to select the correct register address and bit mask.
> In the driver code, one "sophgo,sg2042-pcie-host" corresponds to one core,
> not two. So, you can see in patch 4 of this pathset [1], 3 pcie host-bridge
> nodes are defined, pcie_rc0 ~ pcie_rc2, each corresponding to one core.
> 
> [1]:https://lore.kernel.org/linux-riscv/4a1f23e5426bfb56cad9c07f90d4efaad5eab976.1736923025.git.unicorn_wang@outlook.com/
> 
> I also need to explain that link0 and link1 are actually completely
> independent in PCIE processing, but when sophgo implements the internal msi
> controller for PCIE, its design is not good enough, and the registers for
> processing msi are not made separately for link0 and link1, but mixed
> together, which is what I said cdns_pcie0_ctrl/cdns_pcie1_ctrl. In these two
> new register files added by sophgo (only involving MSI processing), take the
> second cadence IP as an example, some registers are used to control the msi
> controller of pcie_rc1 (corresponding to link0), and some registers are used
> to control the msi controller of pcie_rc2 (corresponding to link1). In a
> more complicated situation, some bits in a register are used to control
> pcie_rc1, and some bits are used to control pcie_rc2. This is why I have to
> add the link_id attribute to know whether the current PCIe host bridge
> corresponds to link0 or link1, so that when processing the msi controller
> related to this pcie host bridge, we can find the corresponding register or
> even the bit of a register in cdns_pcieX_ctrl.
> 
> > "sophgo,link-id" corresponds to Cadence documentation, but I think it
> > is somewhat misleading in the binding because a PCIe "Link" refers to
> > the downstream side of a Root Port.  If we use "link-id" to identify
> > either Core0 or Core1 of a Cadence IP, it sort of bakes in the
> > idea that there can never be more than one Root Port per Core.
>
> The fact is that for the cadence IP used by sg2042, only one root port is
> supported per core.

1) That's true today but may not be true forever.

2) Even if there's only one root port forever, "link" already means
something specific in PCIe, and this usage means something different,
so it's a little confusing.  Maybe a comment to say that this refers
to a "Core", not a PCIe link, is the best we can do.

> ...
> Based on the above analysis, I think the introduction of a three-layer
> structure (pcie-core-port) looks a bit too complicated for candence IP. In
> fact, the source of the discussion at the beginning of this issue was
> whether some attributes should be placed under the host bridge or the root
> port. I suggest that adding the root port layer on the basis of the existing
> patch may be enough. What do you think?
> 
> e.g.,
> 
> pcie_rc0: pcie@7060000000 {
>     compatible = "sophgo,sg2042-pcie-host";
>     ...... // host bride level properties
>     sophgo,link-id = <0>;
>     port {
>         // port level properties
>         vendor-id = <0x1f1c>;
>         device-id = <0x2042>;
>         num-lanes = <4>;
>     }
> };
> 
> pcie_rc1: pcie@7062000000 {
>     compatible = "sophgo,sg2042-pcie-host";
>     ...... // host bride level properties
>     sophgo,link-id = <0>;
>     port {
>         // port level properties
>         vendor-id = <0x1f1c>;
>         device-id = <0x2042>;
>         num-lanes = <2>;
>     };
> };
> 
> pcie_rc2: pcie@7062800000 {
>     compatible = "sophgo,sg2042-pcie-host";
>     ...... // host bride level properties
>     sophgo,link-id = <0>;
>     port {
>         // port level properties
>         vendor-id = <0x1f1c>;
>         device-id = <0x2042>;
>         num-lanes = <2>;
>     }
> };

Where does linux,pci-domain go?

Can you show how link-id 0 and link-id 1 would both be used?  I assume
they need to be connected somehow, since IIUC there's some register
shared between them?

Bjorn
Chen Wang Feb. 12, 2025, 1:50 a.m. UTC | #9
Hello~

On 2025/2/12 7:34, Bjorn Helgaas wrote:
> On Sun, Jan 26, 2025 at 10:27:27AM +0800, Chen Wang wrote:
>> On 2025/1/23 6:21, Bjorn Helgaas wrote:
>>> On Wed, Jan 15, 2025 at 03:06:37PM +0800, Chen Wang wrote:
>>>> From: Chen Wang <unicorn_wang@outlook.com>
>>>>
>>>> Add binding for Sophgo SG2042 PCIe host controller.
>>>> +  sophgo,link-id:
>>>> +    $ref: /schemas/types.yaml#/definitions/uint32
>>>> +    description: |
>>>> +      SG2042 uses Cadence IP, every IP is composed of 2 cores (called link0
>>>> +      & link1 as Cadence's term). Each core corresponds to a host bridge,
>>>> +      and each host bridge has only one root port. Their configuration
>>>> +      registers are completely independent. SG2042 integrates two Cadence IPs,
>>>> +      so there can actually be up to four host bridges. "sophgo,link-id" is
>>>> +      used to identify which core/link the PCIe host bridge node corresponds to.
>>> IIUC, the registers of Cadence IP 1 and IP 2 are completely
>>> independent, and if you describe both of them, you would have separate
>>> "pcie@62000000" stanzas with separate 'reg' and 'ranges' properties.
>> To be precise, for two cores of a cadence IP, each core has a separate set
>> of configuration registers, that is, the configuration of each core is
>> completely independent. This is also what I meant in the binding by "Each
>> core corresponds to a host bridge, and each host bridge has only one root
>> port. Their configuration registers are completely independent.". Maybe the
>> "Their" here is a bit unclear. My original intention was to refer to the
>> core. I can improve this description next time.
>>
>>>   From the driver, it does not look like the registers for Link0 and
>>> Link1 are independent, since the driver claims the
>>> "sophgo,sg2042-pcie-host", which includes two Cores, and it tests
>>> pcie->link_id to select the correct register address and bit mask.
>> In the driver code, one "sophgo,sg2042-pcie-host" corresponds to one core,
>> not two. So, you can see in patch 4 of this pathset [1], 3 pcie host-bridge
>> nodes are defined, pcie_rc0 ~ pcie_rc2, each corresponding to one core.
>>
>> [1]:https://lore.kernel.org/linux-riscv/4a1f23e5426bfb56cad9c07f90d4efaad5eab976.1736923025.git.unicorn_wang@outlook.com/
>>
>> I also need to explain that link0 and link1 are actually completely
>> independent in PCIE processing, but when sophgo implements the internal msi
>> controller for PCIE, its design is not good enough, and the registers for
>> processing msi are not made separately for link0 and link1, but mixed
>> together, which is what I said cdns_pcie0_ctrl/cdns_pcie1_ctrl. In these two
>> new register files added by sophgo (only involving MSI processing), take the
>> second cadence IP as an example, some registers are used to control the msi
>> controller of pcie_rc1 (corresponding to link0), and some registers are used
>> to control the msi controller of pcie_rc2 (corresponding to link1). In a
>> more complicated situation, some bits in a register are used to control
>> pcie_rc1, and some bits are used to control pcie_rc2. This is why I have to
>> add the link_id attribute to know whether the current PCIe host bridge
>> corresponds to link0 or link1, so that when processing the msi controller
>> related to this pcie host bridge, we can find the corresponding register or
>> even the bit of a register in cdns_pcieX_ctrl.
>>
>>> "sophgo,link-id" corresponds to Cadence documentation, but I think it
>>> is somewhat misleading in the binding because a PCIe "Link" refers to
>>> the downstream side of a Root Port.  If we use "link-id" to identify
>>> either Core0 or Core1 of a Cadence IP, it sort of bakes in the
>>> idea that there can never be more than one Root Port per Core.
>> The fact is that for the cadence IP used by sg2042, only one root port is
>> supported per core.
> 1) That's true today but may not be true forever.
>
> 2) Even if there's only one root port forever, "link" already means
> something specific in PCIe, and this usage means something different,
> so it's a little confusing.  Maybe a comment to say that this refers
> to a "Core", not a PCIe link, is the best we can do.
How about using "sophgo,core-id", as I said in the binding description, 
"every IP is composed of 2 cores (called link0 & link1 as Cadence's 
term).". This avoids the conflict with the concept "link " in the PCIe 
specification, what do you think?
>> ...
>> Based on the above analysis, I think the introduction of a three-layer
>> structure (pcie-core-port) looks a bit too complicated for candence IP. In
>> fact, the source of the discussion at the beginning of this issue was
>> whether some attributes should be placed under the host bridge or the root
>> port. I suggest that adding the root port layer on the basis of the existing
>> patch may be enough. What do you think?
>>
>> e.g.,
>>
>> pcie_rc0: pcie@7060000000 {
>>      compatible = "sophgo,sg2042-pcie-host";
>>      ...... // host bride level properties
>>      sophgo,link-id = <0>;
>>      port {
>>          // port level properties
>>          vendor-id = <0x1f1c>;
>>          device-id = <0x2042>;
>>          num-lanes = <4>;
>>      }
>> };
>>
>> pcie_rc1: pcie@7062000000 {
>>      compatible = "sophgo,sg2042-pcie-host";
>>      ...... // host bride level properties
>>      sophgo,link-id = <0>;
>>      port {
>>          // port level properties
>>          vendor-id = <0x1f1c>;
>>          device-id = <0x2042>;
>>          num-lanes = <2>;
>>      };
>> };
>>
>> pcie_rc2: pcie@7062800000 {
>>      compatible = "sophgo,sg2042-pcie-host";
>>      ...... // host bride level properties
>>      sophgo,link-id = <0>;
>>      port {
>>          // port level properties
>>          vendor-id = <0x1f1c>;
>>          device-id = <0x2042>;
>>          num-lanes = <2>;
>>      }
>> };
> Where does linux,pci-domain go?
>
> Can you show how link-id 0 and link-id 1 would both be used?  I assume
> they need to be connected somehow, since IIUC there's some register
> shared between them?
>
> Bjorn

Oh, sorry, I made a typo when I was giving the example. I wrote all the 
link-id values ​​as 0. I rewrote it as follows. I changed 
"sophgo,link-id" to "sophgo,core-id", and added "linux,pci-domain".

e.g.,

pcie_rc0: pcie@7060000000 {

     compatible = "sophgo,sg2042-pcie-host";
     ...... // host bride level properties
     linux,pci-domain = <0>;
     sophgo,core-id = <0>;
     port {
         // port level properties
         vendor-id = <0x1f1c>;
         device-id = <0x2042>;
         num-lanes = <4>;
     }
};

pcie_rc1: pcie@7062000000 {
     compatible = "sophgo,sg2042-pcie-host";
     ...... // host bride level properties
     linux,pci-domain = <1>;
     sophgo,core-id = <0>;
     port {
         // port level properties
         vendor-id = <0x1f1c>;
         device-id = <0x2042>;
         num-lanes = <2>;
     };
};

pcie_rc2: pcie@7062800000 {
     compatible = "sophgo,sg2042-pcie-host";
     ...... // host bride level properties
     linux,pci-domain = <2>;
     sophgo,core-id = <1>;
     port {
         // port level properties
         vendor-id = <0x1f1c>;
         device-id = <0x2042>;
         num-lanes = <2>;
     }

};

pcie_rc1 and pcie_rc2 share registers in cdns_pcie1_ctrl. By using 
different "sophgo,core-id" values, they can distinguish and access the 
registers they need in cdns_pcie1_ctrl.

Regards,

Chen
Bjorn Helgaas Feb. 12, 2025, 4:25 a.m. UTC | #10
On Wed, Feb 12, 2025 at 09:50:11AM +0800, Chen Wang wrote:
> On 2025/2/12 7:34, Bjorn Helgaas wrote:
> > On Sun, Jan 26, 2025 at 10:27:27AM +0800, Chen Wang wrote:
> > > On 2025/1/23 6:21, Bjorn Helgaas wrote:
> > > > On Wed, Jan 15, 2025 at 03:06:37PM +0800, Chen Wang wrote:
> > > > > From: Chen Wang <unicorn_wang@outlook.com>
> > > > > 
> > > > > Add binding for Sophgo SG2042 PCIe host controller.
> ...

> > > > "sophgo,link-id" corresponds to Cadence documentation, but I
> > > > think it is somewhat misleading in the binding because a PCIe
> > > > "Link" refers to the downstream side of a Root Port.  If we
> > > > use "link-id" to identify either Core0 or Core1 of a Cadence
> > > > IP, it sort of bakes in the idea that there can never be more
> > > > than one Root Port per Core.
> > >
> > > The fact is that for the cadence IP used by sg2042, only one
> > > root port is supported per core.
> >
> > 1) That's true today but may not be true forever.
> > 
> > 2) Even if there's only one root port forever, "link" already
> > means something specific in PCIe, and this usage means something
> > different, so it's a little confusing.  Maybe a comment to say
> > that this refers to a "Core", not a PCIe link, is the best we can
> > do.
>
> How about using "sophgo,core-id", as I said in the binding
> description, "every IP is composed of 2 cores (called link0 & link1
> as Cadence's term).".  This avoids the conflict with the concept
> "link " in the PCIe specification, what do you think?

I think that would be great.

> > > Based on the above analysis, I think the introduction of a
> > > three-layer structure (pcie-core-port) looks a bit too
> > > complicated for candence IP. In fact, the source of the
> > > discussion at the beginning of this issue was whether some
> > > attributes should be placed under the host bridge or the root
> > > port. I suggest that adding the root port layer on the basis of
> > > the existing patch may be enough. What do you think?
> > > 
> > > e.g.,
> > > 
> > > pcie_rc0: pcie@7060000000 {
> > >      compatible = "sophgo,sg2042-pcie-host";
> > >      ...... // host bride level properties
> > >      sophgo,link-id = <0>;
> > >      port {
> > >          // port level properties
> > >          vendor-id = <0x1f1c>;
> > >          device-id = <0x2042>;
> > >          num-lanes = <4>;
> > >      }
> > > };
> > > 
> > > pcie_rc1: pcie@7062000000 {
> > >      compatible = "sophgo,sg2042-pcie-host";
> > >      ...... // host bride level properties
> > >      sophgo,link-id = <0>;
> > >      port {
> > >          // port level properties
> > >          vendor-id = <0x1f1c>;
> > >          device-id = <0x2042>;
> > >          num-lanes = <2>;
> > >      };
> > > };
> > > 
> > > pcie_rc2: pcie@7062800000 {
> > >      compatible = "sophgo,sg2042-pcie-host";
> > >      ...... // host bride level properties
> > >      sophgo,link-id = <0>;
> > >      port {
> > >          // port level properties
> > >          vendor-id = <0x1f1c>;
> > >          device-id = <0x2042>;
> > >          num-lanes = <2>;
> > >      }
> > > };
> >
> > Where does linux,pci-domain go?
> > 
> > Can you show how link-id 0 and link-id 1 would both be used?  I
> > assume they need to be connected somehow, since IIUC there's some
> > register shared between them?
> 
> Oh, sorry, I made a typo when I was giving the example. I wrote all
> the link-id values ​​as 0. I rewrote it as follows. I
> changed "sophgo,link-id" to "sophgo,core-id", and added
> "linux,pci-domain".
> 
> e.g.,
> 
> pcie_rc0: pcie@7060000000 {
> 
>     compatible = "sophgo,sg2042-pcie-host";
>     ...... // host bride level properties
>     linux,pci-domain = <0>;
>     sophgo,core-id = <0>;
>     port {
>         // port level properties
>         vendor-id = <0x1f1c>;
>         device-id = <0x2042>;
>         num-lanes = <4>;
>     }
> };
> 
> pcie_rc1: pcie@7062000000 {
>     compatible = "sophgo,sg2042-pcie-host";
>     ...... // host bride level properties
>     linux,pci-domain = <1>;
>     sophgo,core-id = <0>;
>     port {
>         // port level properties
>         vendor-id = <0x1f1c>;
>         device-id = <0x2042>;
>         num-lanes = <2>;
>     };
> };
> 
> pcie_rc2: pcie@7062800000 {
>     compatible = "sophgo,sg2042-pcie-host";
>     ...... // host bride level properties
>     linux,pci-domain = <2>;
>     sophgo,core-id = <1>;
>     port {
>         // port level properties
>         vendor-id = <0x1f1c>;
>         device-id = <0x2042>;
>         num-lanes = <2>;
>     }
> 
> };
> 
> pcie_rc1 and pcie_rc2 share registers in cdns_pcie1_ctrl. By using
> different "sophgo,core-id" values, they can distinguish and access
> the registers they need in cdns_pcie1_ctrl.

Where does cdns_pcie1_ctrl fit in this example?  Does that enclose
both pcie_rc1 and pcie_rc2?
Chen Wang Feb. 12, 2025, 5:54 a.m. UTC | #11
On 2025/2/12 12:25, Bjorn Helgaas wrote:
[......]
>> pcie_rc1 and pcie_rc2 share registers in cdns_pcie1_ctrl. By using
>> different "sophgo,core-id" values, they can distinguish and access
>> the registers they need in cdns_pcie1_ctrl.
> Where does cdns_pcie1_ctrl fit in this example?  Does that enclose
> both pcie_rc1 and pcie_rc2?

cdns_pcie1_ctrl is defined as a syscon node,  which contains registers 
shared by pcie_rc1 and pcie_rc2. In the binding yaml file, I drew a 
diagram to describe the relationship between them, copy here for your 
quick reference:

+                     +-- Core (Link0) <---> pcie_rc1  +-----------------+
+                     |                                |                 |
+      Cadence IP 2 --+                                | cdns_pcie1_ctrl |
+                     |                                |                 |
+                     +-- Core (Link1) <---> pcie_rc2  +-----------------+

The following is an example with cdns_pcie1_ctrl added. For simplicity, 
I deleted pcie_rc0.

pcie_rc1: pcie@7062000000 {
     compatible = "sophgo,sg2042-pcie-host";
     ...... // host bride level properties
     linux,pci-domain = <1>;
     sophgo,core-id = <0>;
     sophgo,syscon-pcie-ctrl = <&cdns_pcie1_ctrl>;
     port {
         // port level properties
         vendor-id = <0x1f1c>;
         device-id = <0x2042>;
         num-lanes = <2>;
     };
};

pcie_rc2: pcie@7062800000 {
     compatible = "sophgo,sg2042-pcie-host";
     ...... // host bride level properties
     linux,pci-domain = <2>;
     sophgo,core-id = <1>;
     sophgo,syscon-pcie-ctrl = <&cdns_pcie1_ctrl>;
     port {
         // port level properties
         vendor-id = <0x1f1c>;
         device-id = <0x2042>;
         num-lanes = <2>;
     }

};

cdns_pcie1_ctrl: syscon@7063800000 {
     compatible = "sophgo,sg2042-pcie-ctrl", "syscon";
     reg = <0x70 0x63800000 0x0 0x800000>;
};
diff mbox series

Patch

diff --git a/Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml b/Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml
new file mode 100644
index 000000000000..f98e71822144
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml
@@ -0,0 +1,147 @@ 
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/sophgo,sg2042-pcie-host.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo SG2042 PCIe Host (Cadence PCIe Wrapper)
+
+description:
+  Sophgo SG2042 PCIe host controller is based on the Cadence PCIe core.
+
+maintainers:
+  - Chen Wang <unicorn_wang@outlook.com>
+
+properties:
+  compatible:
+    const: sophgo,sg2042-pcie-host
+
+  reg:
+    maxItems: 2
+
+  reg-names:
+    items:
+      - const: reg
+      - const: cfg
+
+  vendor-id:
+    const: 0x1f1c
+
+  device-id:
+    const: 0x2042
+
+  msi:
+    type: object
+    $ref: /schemas/interrupt-controller/msi-controller.yaml#
+    unevaluatedProperties: false
+
+    properties:
+      compatible:
+        items:
+          - const: sophgo,sg2042-pcie-msi
+
+      interrupts:
+        maxItems: 1
+
+      interrupt-names:
+        const: msi
+
+  msi-parent: true
+
+  sophgo,link-id:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: |
+      SG2042 uses Cadence IP, every IP is composed of 2 cores (called link0
+      & link1 as Cadence's term). Each core corresponds to a host bridge,
+      and each host bridge has only one root port. Their configuration
+      registers are completely independent. SG2042 integrates two Cadence IPs,
+      so there can actually be up to four host bridges. "sophgo,link-id" is
+      used to identify which core/link the PCIe host bridge node corresponds to.
+
+      The Cadence IP has two modes of operation, selected by a strap pin.
+
+      In the single-link mode, the Cadence PCIe core instance associated
+      with Link0 is connected to all the lanes and the Cadence PCIe core
+      instance associated with Link1 is inactive.
+
+      In the dual-link mode, the Cadence PCIe core instance associated
+      with Link0 is connected to the lower half of the lanes and the
+      Cadence PCIe core instance associated with Link1 is connected to
+      the upper half of the lanes.
+
+      SG2042 contains 2 Cadence IPs and configures the Cores as below:
+
+                     +-- Core (Link0) <---> pcie_rc0  +-----------------+
+                     |                                |                 |
+      Cadence IP 1 --+                                | cdns_pcie0_ctrl |
+                     |                                |                 |
+                     +-- Core (Link1) <---> disabled  +-----------------+
+
+                     +-- Core (Link0) <---> pcie_rc1  +-----------------+
+                     |                                |                 |
+      Cadence IP 2 --+                                | cdns_pcie1_ctrl |
+                     |                                |                 |
+                     +-- Core (Link1) <---> pcie_rc2  +-----------------+
+
+      pcie_rcX is PCIe node ("sophgo,sg2042-pcie-host") defined in DTS.
+
+      Sophgo defines some new register files to add support for their MSI
+      controller inside PCIe. These new register files are defined in DTS as
+      syscon node ("sophgo,sg2042-pcie-ctrl"), i.e. "cdns_pcie0_ctrl" /
+      "cdns_pcie1_ctrl". cdns_pcieX_ctrl contains some registers shared by
+      pcie_rcX, even two RC (Link)s may share different bits of the same
+      register. For example, cdns_pcie1_ctrl contains registers shared by
+      link0 & link1 for Cadence IP 2.
+
+      "sophgo,link-id" is defined to distinguish the two RC's in one Cadence IP,
+      so we can know what registers (bits) we should use.
+
+  sophgo,syscon-pcie-ctrl:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      Phandle to the PCIe System Controller DT node. It's required to
+      access some MSI operation registers shared by PCIe RCs.
+
+allOf:
+  - $ref: cdns-pcie-host.yaml#
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - vendor-id
+  - device-id
+  - sophgo,link-id
+  - sophgo,syscon-pcie-ctrl
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    pcie@62000000 {
+      compatible = "sophgo,sg2042-pcie-host";
+      device_type = "pci";
+      reg = <0x62000000  0x00800000>,
+            <0x48000000  0x00001000>;
+      reg-names = "reg", "cfg";
+      #address-cells = <3>;
+      #size-cells = <2>;
+      ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
+               <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;
+      bus-range = <0x00 0xff>;
+      vendor-id = <0x1f1c>;
+      device-id = <0x2042>;
+      cdns,no-bar-match-nbits = <48>;
+      sophgo,link-id = <0>;
+      sophgo,syscon-pcie-ctrl = <&cdns_pcie1_ctrl>;
+      msi-parent = <&msi_pcie>;
+      msi_pcie: msi {
+        compatible = "sophgo,sg2042-pcie-msi";
+        msi-controller;
+        interrupt-parent = <&intc>;
+        interrupts = <123 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "msi";
+      };
+    };