diff mbox

[v2,11/18] nvdimm: build ACPI nvdimm devices

Message ID 1439563931-12352-12-git-send-email-guangrong.xiao@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong Aug. 14, 2015, 2:52 p.m. UTC
NVDIMM devices is defined in ACPI 6.0 9.20 NVDIMM Devices

This is a root device under \_SB and specified NVDIMM device are under the
root device. Each NVDIMM device has _ADR which return its handle used to
associate MEMDEV table in NFIT

We reserve handle 0 for root device. In this patch, we save handle, arg0,
arg1 and arg2. Arg3 is conditionally saved in later patch

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 hw/i386/acpi-build.c       |   2 +
 hw/mem/nvdimm/acpi.c       | 130 ++++++++++++++++++++++++++++++++++++++++++++-
 include/hw/mem/pc-nvdimm.h |   2 +
 3 files changed, 132 insertions(+), 2 deletions(-)
diff mbox

Patch

diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 092ed2f..a792135 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -1342,6 +1342,8 @@  build_ssdt(GArray *table_data, GArray *linker,
                 aml_append(sb_scope, scope);
             }
         }
+
+        pc_nvdimm_build_acpi_devices(sb_scope);
         aml_append(ssdt, sb_scope);
     }
 
diff --git a/hw/mem/nvdimm/acpi.c b/hw/mem/nvdimm/acpi.c
index e0f2ad3..909a8ef 100644
--- a/hw/mem/nvdimm/acpi.c
+++ b/hw/mem/nvdimm/acpi.c
@@ -135,10 +135,11 @@  struct nfit_dcr {
     uint8_t reserved2[6];
 } QEMU_PACKED;
 
-#define REVSISON_ID    1
-#define NFIT_FIC1      0x201
+#define REVSISON_ID             1
+#define NFIT_FIC1               0x201
 
 #define MAX_NVDIMM_NUMBER       10
+#define NOTIFY_VALUE            0x99
 
 static int get_nvdimm_device_number(GSList *list)
 {
@@ -281,12 +282,15 @@  static size_t dsm_size;
 static uint64_t dsm_read(void *opaque, hwaddr addr,
                          unsigned size)
 {
+    fprintf(stderr, "BUG: we never read DSM notification MMIO.\n");
+    assert(0);
     return 0;
 }
 
 static void dsm_write(void *opaque, hwaddr addr,
                       uint64_t val, unsigned size)
 {
+    assert(val == NOTIFY_VALUE);
 }
 
 static const MemoryRegionOps dsm_ops = {
@@ -361,3 +365,125 @@  void pc_nvdimm_build_nfit_table(GArray *table_offsets, GArray *table_data,
 exit:
     g_slist_free(list);
 }
+
+#define BUILD_STA_METHOD(_dev_, _method_)                                  \
+    do {                                                                   \
+        _method_ = aml_method("_STA", 0);                                  \
+        aml_append(_method_, aml_return(aml_int(0x0f)));                   \
+        aml_append(_dev_, _method_);                                       \
+    } while (0)
+
+#define SAVE_ARG012_HANDLE(_method_, _handle_)                             \
+    do {                                                                   \
+        aml_append(_method_, aml_store(_handle_, aml_name("HDLE")));       \
+        aml_append(_method_, aml_store(aml_arg(0), aml_name("ARG0")));     \
+        aml_append(_method_, aml_store(aml_arg(1), aml_name("ARG1")));     \
+        aml_append(_method_, aml_store(aml_arg(2), aml_name("ARG2")));     \
+    } while (0)
+
+#define NOTIFY_AND_RETURN(_method_)                                        \
+    do {                                                                   \
+        aml_append(_method_, aml_store(aml_int(NOTIFY_VALUE),              \
+                   aml_name("NOTI")));                                     \
+        aml_append(_method_, aml_return(aml_name("ODAT")));                \
+    } while (0)
+
+static void build_nvdimm_devices(Aml *root_dev, GSList *list)
+{
+    for (; list; list = list->next) {
+        PCNVDIMMDevice *nvdimm = list->data;
+        uint32_t handle = nvdimm_index_to_handle(nvdimm->device_index);
+        Aml *dev, *method;
+
+        dev = aml_device("NVD%d", nvdimm->device_index);
+        aml_append(dev, aml_name_decl("_ADR", aml_int(handle)));
+
+        BUILD_STA_METHOD(dev, method);
+
+        method = aml_method("_DSM", 4);
+        {
+            SAVE_ARG012_HANDLE(method, aml_int(handle));
+            NOTIFY_AND_RETURN(method);
+        }
+        aml_append(dev, method);
+
+        aml_append(root_dev, dev);
+    }
+}
+
+void pc_nvdimm_build_acpi_devices(Aml *sb_scope)
+{
+    Aml *dev, *method, *field;
+    struct dsm_buffer *dsm_buf;
+    GSList *list = get_nvdimm_built_list();
+    int nr = get_nvdimm_device_number(list);
+
+    if (nr <= 0 || nr > MAX_NVDIMM_NUMBER) {
+        g_slist_free(list);
+        return;
+    }
+
+    dev = aml_device("NVDR");
+    aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
+
+    /* map DSM buffer into ACPI namespace. */
+    aml_append(dev, aml_operation_region("DSMR", AML_SYSTEM_MEMORY,
+               dsm_addr, dsm_size));
+
+    /*
+     * DSM input:
+     * @HDLE: store device's handle, it's zero if the _DSM call happens
+     *        on ROOT.
+     * @ARG0 ~ @ARG3: store the parameters of _DSM call.
+     *
+     * They are ram mapping on host so that these access never cause VM-EXIT.
+     */
+    field = aml_field("DSMR", AML_DWORD_ACC, AML_PRESERVE);
+    aml_append(field, aml_named_field("HDLE",
+                   sizeof(dsm_buf->handle) * BITS_PER_BYTE));
+    aml_append(field, aml_named_field("ARG0",
+                   sizeof(dsm_buf->arg0) * BITS_PER_BYTE));
+    aml_append(field, aml_named_field("ARG1",
+                   sizeof(dsm_buf->arg1) * BITS_PER_BYTE));
+    aml_append(field, aml_named_field("ARG2",
+                   sizeof(dsm_buf->arg2) * BITS_PER_BYTE));
+    aml_append(field, aml_named_field("ARG3",
+                   sizeof(dsm_buf->arg3) * BITS_PER_BYTE));
+    /*
+     * DSM input:
+     * @NOTI: write value to it will notify QEMU that _DSM method is being
+     *        called and the parameters can be found in dsm_buf.
+     *
+     * It is MMIO mapping on host so that it will cause VM-exit and QEMU
+     * gets control.
+     */
+    aml_append(field, aml_named_field("NOTI",
+                   sizeof(dsm_buf->notify) * BITS_PER_BYTE));
+    aml_append(dev, field);
+
+    /*
+     * DSM output:
+     * @ODAT: it resues the first page of dsm buffer and QEMU uses it to
+     *        stores the result
+     *
+     * Since the first page is reused by both input and out, the input data
+     * will be lost after storing new result into @ODAT
+     */
+    field = aml_field("DSMR", AML_DWORD_ACC, AML_PRESERVE);
+    aml_append(field, aml_named_field("ODAT", PAGE_SIZE * BITS_PER_BYTE));
+    aml_append(dev, field);
+
+    BUILD_STA_METHOD(dev, method);
+
+    method = aml_method("_DSM", 4);
+    {
+        SAVE_ARG012_HANDLE(method, aml_int(0));
+        NOTIFY_AND_RETURN(method);
+    }
+    aml_append(dev, method);
+
+    build_nvdimm_devices(dev, list);
+
+    aml_append(sb_scope, dev);
+    g_slist_free(list);
+}
diff --git a/include/hw/mem/pc-nvdimm.h b/include/hw/mem/pc-nvdimm.h
index b2da8fa..b7faec3 100644
--- a/include/hw/mem/pc-nvdimm.h
+++ b/include/hw/mem/pc-nvdimm.h
@@ -14,6 +14,7 @@ 
 #define __PC_NVDIMM_H
 
 #include "hw/qdev.h"
+#include "hw/acpi/aml-build.h"
 
 typedef struct PCNVDIMMDevice {
     /* private */
@@ -38,4 +39,5 @@  typedef struct PCNVDIMMDevice {
 void pc_nvdimm_reserve_range(ram_addr_t offset);
 void pc_nvdimm_build_nfit_table(GArray *table_offsets, GArray *table_data,
                                 GArray *linker);
+void pc_nvdimm_build_acpi_devices(Aml *sb_scope);
 #endif