diff mbox series

[v6,19/43] hw/cxl/device: Implement MMIO HDM decoding (8.2.5.12)

Message ID 20220211120747.3074-20-Jonathan.Cameron@huawei.com (mailing list archive)
State New, archived
Headers show
Series CXl 2.0 emulation Support | expand

Commit Message

Jonathan Cameron Feb. 11, 2022, 12:07 p.m. UTC
From: Ben Widawsky <ben.widawsky@intel.com>

A device's volatile and persistent memory are known Host Defined Memory
(HDM) regions. The mechanism by which the device is programmed to claim
the addresses associated with those regions is through dedicated logic
known as the HDM decoder. In order to allow the OS to properly program
the HDMs, the HDM decoders must be modeled.

There are two ways the HDM decoders can be implemented, the legacy
mechanism is through the PCIe DVSEC programming from CXL 1.1 (8.1.3.8),
and MMIO is found in 8.2.5.12 of the spec. For now, 8.1.3.8 is not
implemented.

Much of CXL device logic is implemented in cxl-utils. The HDM decoder
however is implemented directly by the device implementation.
Whilst the implementation currently does no validity checks on the
encoder set up, future work will add sanity checking specific to
the type of cxl component.

Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
Co-developed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
---
 hw/mem/cxl_type3.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

Comments

Alex Bennée March 1, 2022, 6:17 p.m. UTC | #1
Jonathan Cameron <Jonathan.Cameron@huawei.com> writes:

> From: Ben Widawsky <ben.widawsky@intel.com>
>
> A device's volatile and persistent memory are known Host Defined Memory
> (HDM) regions. The mechanism by which the device is programmed to claim
> the addresses associated with those regions is through dedicated logic
> known as the HDM decoder. In order to allow the OS to properly program
> the HDMs, the HDM decoders must be modeled.
>
> There are two ways the HDM decoders can be implemented, the legacy
> mechanism is through the PCIe DVSEC programming from CXL 1.1 (8.1.3.8),
> and MMIO is found in 8.2.5.12 of the spec. For now, 8.1.3.8 is not
> implemented.
>
> Much of CXL device logic is implemented in cxl-utils. The HDM decoder
> however is implemented directly by the device implementation.
> Whilst the implementation currently does no validity checks on the
> encoder set up, future work will add sanity checking specific to
> the type of cxl component.
>
> Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
> Co-developed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> ---
>  hw/mem/cxl_type3.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 54 insertions(+)
>
> diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
> index c4021d2434..da091157f2 100644
> --- a/hw/mem/cxl_type3.c
> +++ b/hw/mem/cxl_type3.c
> @@ -61,6 +61,56 @@ static void build_dvsecs(CXLType3Dev *ct3d)
>                                 REG_LOC_DVSEC_REVID, dvsec);
>  }
>  
> +static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
> +{
> +    ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
> +    uint32_t *cache_mem = cregs->cache_mem_registers;
> +
> +    assert(which == 0);
> +
> +    /* TODO: Sanity checks that the decoder is possible */
> +    ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0);
> +    ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0);
> +
> +    ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
> +}
> +
> +static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
> +                           unsigned size)
> +{
> +    CXLComponentState *cxl_cstate = opaque;
> +    ComponentRegisters *cregs = &cxl_cstate->crb;
> +    CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
> +    uint32_t *cache_mem = cregs->cache_mem_registers;
> +    bool should_commit = false;
> +    int which_hdm = -1;
> +
> +    assert(size == 4);

Maybe add:

      g_assert(offset <= (CXL2_COMPONENT_CM_REGION_SIZE >> 2));


> +
> +    switch (offset) {
> +    case A_CXL_HDM_DECODER0_CTRL:
> +        should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
> +        which_hdm = 0;
> +        break;
> +    default:
> +        break;
> +    }
> +
> +    stl_le_p((uint8_t *)cache_mem + offset, value);
> +    if (should_commit) {
> +        hdm_decoder_commit(ct3d, which_hdm);
> +    }
> +}
> +
> +static void ct3_finalize(Object *obj)
> +{
> +    CXLType3Dev *ct3d = CT3(obj);
> +    CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
> +    ComponentRegisters *regs = &cxl_cstate->crb;
> +
> +    g_free((void *)regs->special_ops);

nit: you don't need to cast her.

> +}
> +
>  static void cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
>  {
>      MemoryRegion *mr;
> @@ -103,6 +153,9 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
>      ct3d->cxl_cstate.pdev = pci_dev;
>      build_dvsecs(ct3d);
>  
> +    regs->special_ops = g_new0(MemoryRegionOps, 1);
> +    regs->special_ops->write = ct3d_reg_write;
> +
>      cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
>                                        TYPE_CXL_TYPE3_DEV);
>  
> @@ -155,6 +208,7 @@ static const TypeInfo ct3d_info = {
>      .parent = TYPE_PCI_DEVICE,
>      .class_init = ct3_class_init,
>      .instance_size = sizeof(CXLType3Dev),
> +    .instance_finalize = ct3_finalize,
>      .interfaces = (InterfaceInfo[]) {
>          { INTERFACE_CXL_DEVICE },
>          { INTERFACE_PCIE_DEVICE },

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Jonathan Cameron March 3, 2022, 6:07 p.m. UTC | #2
On Tue, 01 Mar 2022 18:17:35 +0000
Alex Bennée <alex.bennee@linaro.org> wrote:

> Jonathan Cameron <Jonathan.Cameron@huawei.com> writes:
> 
> > From: Ben Widawsky <ben.widawsky@intel.com>
> >
> > A device's volatile and persistent memory are known Host Defined Memory
> > (HDM) regions. The mechanism by which the device is programmed to claim
> > the addresses associated with those regions is through dedicated logic
> > known as the HDM decoder. In order to allow the OS to properly program
> > the HDMs, the HDM decoders must be modeled.
> >
> > There are two ways the HDM decoders can be implemented, the legacy
> > mechanism is through the PCIe DVSEC programming from CXL 1.1 (8.1.3.8),
> > and MMIO is found in 8.2.5.12 of the spec. For now, 8.1.3.8 is not
> > implemented.
> >
> > Much of CXL device logic is implemented in cxl-utils. The HDM decoder
> > however is implemented directly by the device implementation.
> > Whilst the implementation currently does no validity checks on the
> > encoder set up, future work will add sanity checking specific to
> > the type of cxl component.
> >
> > Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
> > Co-developed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> > Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> > ---
> >  hw/mem/cxl_type3.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 54 insertions(+)
> >
> > diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
> > index c4021d2434..da091157f2 100644
> > --- a/hw/mem/cxl_type3.c
> > +++ b/hw/mem/cxl_type3.c
> > @@ -61,6 +61,56 @@ static void build_dvsecs(CXLType3Dev *ct3d)
> >                                 REG_LOC_DVSEC_REVID, dvsec);
> >  }
> >  
> > +static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
> > +{
> > +    ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
> > +    uint32_t *cache_mem = cregs->cache_mem_registers;
> > +
> > +    assert(which == 0);
> > +
> > +    /* TODO: Sanity checks that the decoder is possible */
> > +    ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0);
> > +    ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0);
> > +
> > +    ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
> > +}
> > +
> > +static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
> > +                           unsigned size)
> > +{
> > +    CXLComponentState *cxl_cstate = opaque;
> > +    ComponentRegisters *cregs = &cxl_cstate->crb;
> > +    CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
> > +    uint32_t *cache_mem = cregs->cache_mem_registers;
> > +    bool should_commit = false;
> > +    int which_hdm = -1;
> > +
> > +    assert(size == 4);  
> 
> Maybe add:
> 
>       g_assert(offset <= (CXL2_COMPONENT_CM_REGION_SIZE >> 2));

offset is in bytes here, but agreed without the >> 2.
Could move this over to use a 4 byte step if you think that's a good idea.



For other comments changed as suggested.

Thanks,

Jonathan
diff mbox series

Patch

diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
index c4021d2434..da091157f2 100644
--- a/hw/mem/cxl_type3.c
+++ b/hw/mem/cxl_type3.c
@@ -61,6 +61,56 @@  static void build_dvsecs(CXLType3Dev *ct3d)
                                REG_LOC_DVSEC_REVID, dvsec);
 }
 
+static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
+{
+    ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
+    uint32_t *cache_mem = cregs->cache_mem_registers;
+
+    assert(which == 0);
+
+    /* TODO: Sanity checks that the decoder is possible */
+    ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0);
+    ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0);
+
+    ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
+}
+
+static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
+                           unsigned size)
+{
+    CXLComponentState *cxl_cstate = opaque;
+    ComponentRegisters *cregs = &cxl_cstate->crb;
+    CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
+    uint32_t *cache_mem = cregs->cache_mem_registers;
+    bool should_commit = false;
+    int which_hdm = -1;
+
+    assert(size == 4);
+
+    switch (offset) {
+    case A_CXL_HDM_DECODER0_CTRL:
+        should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
+        which_hdm = 0;
+        break;
+    default:
+        break;
+    }
+
+    stl_le_p((uint8_t *)cache_mem + offset, value);
+    if (should_commit) {
+        hdm_decoder_commit(ct3d, which_hdm);
+    }
+}
+
+static void ct3_finalize(Object *obj)
+{
+    CXLType3Dev *ct3d = CT3(obj);
+    CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
+    ComponentRegisters *regs = &cxl_cstate->crb;
+
+    g_free((void *)regs->special_ops);
+}
+
 static void cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
 {
     MemoryRegion *mr;
@@ -103,6 +153,9 @@  static void ct3_realize(PCIDevice *pci_dev, Error **errp)
     ct3d->cxl_cstate.pdev = pci_dev;
     build_dvsecs(ct3d);
 
+    regs->special_ops = g_new0(MemoryRegionOps, 1);
+    regs->special_ops->write = ct3d_reg_write;
+
     cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
                                       TYPE_CXL_TYPE3_DEV);
 
@@ -155,6 +208,7 @@  static const TypeInfo ct3d_info = {
     .parent = TYPE_PCI_DEVICE,
     .class_init = ct3_class_init,
     .instance_size = sizeof(CXLType3Dev),
+    .instance_finalize = ct3_finalize,
     .interfaces = (InterfaceInfo[]) {
         { INTERFACE_CXL_DEVICE },
         { INTERFACE_PCIE_DEVICE },