@@ -372,6 +372,24 @@ static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
g_array_free(structures, true);
}
+struct NvdimmDsmIn {
+ uint32_t handle;
+ uint32_t revision;
+ uint32_t function;
+ /* the remaining size in the page is used by arg3. */
+ union {
+ uint8_t arg3[0];
+ };
+} QEMU_PACKED;
+typedef struct NvdimmDsmIn NvdimmDsmIn;
+
+struct NvdimmDsmOut {
+ /* the size of buffer filled by QEMU. */
+ uint32_t len;
+ uint8_t data[0];
+} QEMU_PACKED;
+typedef struct NvdimmDsmOut NvdimmDsmOut;
+
static uint64_t
nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
{
@@ -411,11 +429,18 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
static void nvdimm_build_common_dsm(Aml *dev)
{
- Aml *method, *ifctx, *function;
+ Aml *method, *ifctx, *function, *dsm_mem, *unpatched, *result_size;
uint8_t byte_list[1];
- method = aml_method(NVDIMM_COMMON_DSM, 4, AML_NOTSERIALIZED);
+ method = aml_method(NVDIMM_COMMON_DSM, 4, AML_SERIALIZED);
function = aml_arg(2);
+ dsm_mem = aml_name(NVDIMM_ACPI_MEM_ADDR);
+
+ /*
+ * do not support any method if DSM memory address has not been
+ * patched.
+ */
+ unpatched = aml_if(aml_equal(dsm_mem, aml_int(0x0)));
/*
* function 0 is called to inquire what functions are supported by
@@ -424,12 +449,38 @@ static void nvdimm_build_common_dsm(Aml *dev)
ifctx = aml_if(aml_equal(function, aml_int(0)));
byte_list[0] = 0 /* No function Supported */;
aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
- aml_append(method, ifctx);
+ aml_append(unpatched, ifctx);
/* No function is supported yet. */
byte_list[0] = 1 /* Not Supported */;
- aml_append(method, aml_return(aml_buffer(1, byte_list)));
+ aml_append(unpatched, aml_return(aml_buffer(1, byte_list)));
+ aml_append(method, unpatched);
+ /*
+ * The HDLE indicates the DSM function is issued from which device,
+ * it is not used at this time as no function is supported yet.
+ * Currently we make it always be 0 for all the devices and will set
+ * the appropriate value once real function is implemented.
+ */
+ aml_append(method, aml_store(aml_int(0x0), aml_name("HDLE")));
+ aml_append(method, aml_store(aml_arg(1), aml_name("REVS")));
+ aml_append(method, aml_store(aml_arg(2), aml_name("FUNC")));
+
+ /*
+ * tell QEMU about the real address of DSM memory, then QEMU
+ * gets the control and fills the result in DSM memory.
+ */
+ aml_append(method, aml_store(dsm_mem, aml_name("NTFI")));
+
+ result_size = aml_local(1);
+ aml_append(method, aml_store(aml_name("RLEN"), result_size));
+ aml_append(method, aml_store(aml_shiftleft(result_size, aml_int(3)),
+ result_size));
+ aml_append(method, aml_create_field(aml_name("ODAT"), aml_int(0),
+ result_size, "OBUF"));
+ aml_append(method, aml_concatenate(aml_buffer(0, NULL), aml_name("OBUF"),
+ aml_arg(6)));
+ aml_append(method, aml_return(aml_arg(6)));
aml_append(dev, method);
}
@@ -472,7 +523,7 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
GArray *table_data, GArray *linker)
{
- Aml *ssdt, *sb_scope, *dev;
+ Aml *ssdt, *sb_scope, *dev, *field;
int mem_addr_offset, nvdimm_ssdt;
acpi_add_table(table_offsets, table_data);
@@ -497,6 +548,65 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
*/
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
+ /* map DSM memory and IO into ACPI namespace. */
+ aml_append(dev, aml_operation_region("NPIO", AML_SYSTEM_IO,
+ aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
+ aml_append(dev, aml_operation_region("NRAM", AML_SYSTEM_MEMORY,
+ aml_name(NVDIMM_ACPI_MEM_ADDR), TARGET_PAGE_SIZE));
+
+ /*
+ * DSM notifier:
+ * NTFI: write the address of DSM memory and notify QEMU to emulate
+ * the access.
+ *
+ * It is the IO port so that accessing them will cause VM-exit, the
+ * control will be transferred to QEMU.
+ */
+ field = aml_field("NPIO", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
+ aml_append(field, aml_named_field("NTFI",
+ sizeof(uint32_t) * BITS_PER_BYTE));
+ aml_append(dev, field);
+
+ /*
+ * DSM input:
+ * HDLE: store device's handle, it's zero if the _DSM call happens
+ * on NVDIMM Root Device.
+ * REVS: store the Arg1 of _DSM call.
+ * FUNC: store the Arg2 of _DSM call.
+ * ARG3: store the Arg3 of _DSM call.
+ *
+ * They are RAM mapping on host so that these accesses never cause
+ * VM-EXIT.
+ */
+ field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
+ aml_append(field, aml_named_field("HDLE",
+ sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field("REVS",
+ sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field("FUNC",
+ sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field("ARG3",
+ (TARGET_PAGE_SIZE - offsetof(NvdimmDsmIn, arg3)) *
+ BITS_PER_BYTE));
+ aml_append(dev, field);
+
+ /*
+ * DSM output:
+ * RLEN: the size of the buffer filled by QEMU.
+ * ODAT: the buffer QEMU uses to store the result.
+ *
+ * Since the page is reused by both input and out, the input data
+ * will be lost after storing new result into ODAT so we should fetch
+ * all the input data before writing the result.
+ */
+ field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
+ aml_append(field, aml_named_field("RLEN",
+ sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field("ODAT",
+ (TARGET_PAGE_SIZE - offsetof(NvdimmDsmOut, data)) *
+ BITS_PER_BYTE));
+ aml_append(dev, field);
+
nvdimm_build_common_dsm(dev);
nvdimm_build_device_dsm(dev);