From patchwork Fri Oct 30 05:56:19 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiao Guangrong X-Patchwork-Id: 7524051 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 2C7DDBF90C for ; Fri, 30 Oct 2015 06:02:59 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 5EC252081C for ; Fri, 30 Oct 2015 06:02:57 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 2AD6120835 for ; Fri, 30 Oct 2015 06:02:56 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1758628AbbJ3GCw (ORCPT ); Fri, 30 Oct 2015 02:02:52 -0400 Received: from mga11.intel.com ([192.55.52.93]:38004 "EHLO mga11.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1758604AbbJ3GCt (ORCPT ); Fri, 30 Oct 2015 02:02:49 -0400 Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga102.fm.intel.com with ESMTP; 29 Oct 2015 23:02:48 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,217,1444719600"; d="scan'208";a="590814275" Received: from xiaoreal1.sh.intel.com (HELO xiaoreal1.sh.intel.com.sh.intel.com) ([10.239.48.79]) by FMSMGA003.fm.intel.com with ESMTP; 29 Oct 2015 23:02:46 -0700 From: Xiao Guangrong To: pbonzini@redhat.com, imammedo@redhat.com Cc: gleb@kernel.org, mtosatti@redhat.com, stefanha@redhat.com, mst@redhat.com, rth@twiddle.net, ehabkost@redhat.com, dan.j.williams@intel.com, kvm@vger.kernel.org, qemu-devel@nongnu.org, Xiao Guangrong Subject: [PATCH v6 25/33] nvdimm acpi: build ACPI nvdimm devices Date: Fri, 30 Oct 2015 13:56:19 +0800 Message-Id: <1446184587-142784-26-git-send-email-guangrong.xiao@linux.intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1446184587-142784-1-git-send-email-guangrong.xiao@linux.intel.com> References: <1446184587-142784-1-git-send-email-guangrong.xiao@linux.intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-7.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP NVDIMM devices is defined in ACPI 6.0 9.20 NVDIMM Devices There is a root device under \_SB and specified NVDIMM devices are under the root device. Each NVDIMM device has _ADR which returns its handle used to associate MEMDEV structure in NFIT We reserve handle 0 for root device. In this patch, we save handle, handle, arg1 and arg2 to dsm memory. Arg3 is conditionally saved in later patch Signed-off-by: Xiao Guangrong --- hw/acpi/nvdimm.c | 184 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 184 insertions(+) diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index dd84e5f..53ed675 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -368,6 +368,15 @@ static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets, g_array_free(structures, true); } +struct NvdimmDsmIn { + uint32_t handle; + uint32_t revision; + uint32_t function; + /* the remaining size in the page is used by arg3. */ + uint8_t arg3[0]; +} QEMU_PACKED; +typedef struct NvdimmDsmIn NvdimmDsmIn; + static uint64_t nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size) { @@ -377,6 +386,7 @@ nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size) static void nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { + fprintf(stderr, "BUG: we never write DSM notification IO Port.\n"); } static const MemoryRegionOps nvdimm_dsm_ops = { @@ -402,6 +412,179 @@ void nvdimm_init_acpi_state(MemoryRegion *memory, MemoryRegion *io, memory_region_add_subregion(io, NVDIMM_ACPI_IO_BASE, &state->io_mr); } +#define BUILD_STA_METHOD(_dev_, _method_) \ + do { \ + _method_ = aml_method("_STA", 0); \ + aml_append(_method_, aml_return(aml_int(0x0f))); \ + aml_append(_dev_, _method_); \ + } while (0) + +#define BUILD_DSM_METHOD(_dev_, _method_, _handle_, _uuid_) \ + do { \ + Aml *ifctx, *uuid; \ + _method_ = aml_method("_DSM", 4); \ + /* check UUID if it is we expect, return the errorcode if not.*/ \ + uuid = aml_touuid(_uuid_); \ + ifctx = aml_if(aml_lnot(aml_equal(aml_arg(0), uuid))); \ + aml_append(ifctx, aml_return(aml_int(1 /* Not Supported */))); \ + aml_append(method, ifctx); \ + aml_append(method, aml_return(aml_call4("NCAL", aml_int(_handle_), \ + aml_arg(1), aml_arg(2), aml_arg(3)))); \ + aml_append(_dev_, _method_); \ + } while (0) + +#define BUILD_FIELD_UNIT_SIZE(_field_, _byte_, _name_) \ + aml_append(_field_, aml_named_field(_name_, (_byte_) * BITS_PER_BYTE)) + +#define BUILD_FIELD_UNIT_STRUCT(_field_, _s_, _f_, _name_) \ + BUILD_FIELD_UNIT_SIZE(_field_, sizeof(typeof_field(_s_, _f_)), _name_) + +static void build_nvdimm_devices(GSList *device_list, Aml *root_dev) +{ + for (; device_list; device_list = device_list->next) { + NVDIMMDevice *nvdimm = device_list->data; + int slot = object_property_get_int(OBJECT(nvdimm), DIMM_SLOT_PROP, + NULL); + uint32_t handle = nvdimm_slot_to_handle(slot); + Aml *dev, *method; + + dev = aml_device("NV%02X", slot); + aml_append(dev, aml_name_decl("_ADR", aml_int(handle))); + + BUILD_STA_METHOD(dev, method); + + /* + * Chapter 4: _DSM Interface for NVDIMM Device (non-root) - Example + * in DSM Spec Rev1. + */ + BUILD_DSM_METHOD(dev, method, + handle /* NVDIMM Device Handle */, + "4309AC30-0D11-11E4-9191-0800200C9A66" + /* UUID for NVDIMM Devices. */); + + aml_append(root_dev, dev); + } +} + +static void nvdimm_build_acpi_devices(GSList *device_list, Aml *sb_scope) +{ + Aml *dev, *method, *field; + uint64_t page_size = TARGET_PAGE_SIZE; + + dev = aml_device("NVDR"); + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012"))); + + /* map DSM memory and IO into ACPI namespace. */ + aml_append(dev, aml_operation_region("NPIO", AML_SYSTEM_IO, + NVDIMM_ACPI_IO_BASE, NVDIMM_ACPI_IO_LEN)); + aml_append(dev, aml_operation_region("NRAM", AML_SYSTEM_MEMORY, + NVDIMM_ACPI_MEM_BASE, page_size)); + + /* + * DSM notifier: + * @NOTI: Read it will notify QEMU that _DSM method is being + * called and the parameters can be found in NvdimmDsmIn. + * The value read from it is the buffer size of DSM output + * filled by QEMU. + */ + field = aml_field("NPIO", AML_DWORD_ACC, AML_PRESERVE); + BUILD_FIELD_UNIT_SIZE(field, sizeof(uint32_t), "NOTI"); + aml_append(dev, field); + + /* + * DSM input: + * @HDLE: store device's handle, it's zero if the _DSM call happens + * on NVDIMM Root Device. + * @REVS: store the Arg1 of _DSM call. + * @FUNC: store the Arg2 of _DSM call. + * @ARG3: store the Arg3 of _DSM call. + * + * They are RAM mapping on host so that these accesses never cause + * VM-EXIT. + */ + field = aml_field("NRAM", AML_DWORD_ACC, AML_PRESERVE); + BUILD_FIELD_UNIT_STRUCT(field, NvdimmDsmIn, handle, "HDLE"); + BUILD_FIELD_UNIT_STRUCT(field, NvdimmDsmIn, revision, "REVS"); + BUILD_FIELD_UNIT_STRUCT(field, NvdimmDsmIn, function, "FUNC"); + BUILD_FIELD_UNIT_SIZE(field, page_size - offsetof(NvdimmDsmIn, arg3), + "ARG3"); + aml_append(dev, field); + + /* + * DSM output: + * @ODAT: the buffer QEMU uses to store the result, the actual size + * filled by QEMU is the value read from NOT1. + * + * Since the page is reused by both input and out, the input data + * will be lost after storing new result into @ODAT. + */ + field = aml_field("NRAM", AML_DWORD_ACC, AML_PRESERVE); + BUILD_FIELD_UNIT_SIZE(field, page_size, "ODAT"); + aml_append(dev, field); + + method = aml_method_serialized("NCAL", 4); + { + Aml *buffer_size = aml_local(0); + + aml_append(method, aml_store(aml_arg(0), aml_name("HDLE"))); + aml_append(method, aml_store(aml_arg(1), aml_name("REVS"))); + aml_append(method, aml_store(aml_arg(2), aml_name("FUNC"))); + + /* + * transfer control to QEMU and the buffer size filled by + * QEMU is returned. + */ + aml_append(method, aml_store(aml_name("NOTI"), buffer_size)); + + aml_append(method, aml_store(aml_shiftleft(buffer_size, + aml_int(3)), buffer_size)); + + aml_append(method, aml_create_field(aml_name("ODAT"), aml_int(0), + buffer_size , "OBUF")); + aml_append(method, aml_concatenate(aml_buffer(0, NULL), + aml_name("OBUF"), aml_local(1))); + aml_append(method, aml_return(aml_local(1))); + } + aml_append(dev, method); + + BUILD_STA_METHOD(dev, method); + + /* + * Chapter 3: _DSM Interface for NVDIMM Root Device - Example in DSM + * Spec Rev1. + */ + BUILD_DSM_METHOD(dev, method, + 0 /* 0 is reserved for NVDIMM Root Device*/, + "2F10E7A4-9E91-11E4-89D3-123B93F75CBA" + /* UUID for NVDIMM Root Devices. */); + + build_nvdimm_devices(device_list, dev); + + aml_append(sb_scope, dev); +} + +static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets, + GArray *table_data, GArray *linker) +{ + Aml *ssdt, *sb_scope; + + acpi_add_table(table_offsets, table_data); + + ssdt = init_aml_allocator(); + acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader)); + + sb_scope = aml_scope("\\_SB"); + nvdimm_build_acpi_devices(device_list, sb_scope); + + aml_append(ssdt, sb_scope); + /* copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len); + build_header(linker, table_data, + (void *)(table_data->data + table_data->len - ssdt->buf->len), + "SSDT", ssdt->buf->len, 1); + free_aml_allocator(); +} + void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, GArray *linker) { @@ -414,5 +597,6 @@ void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, } nvdimm_build_nfit(device_list, table_offsets, table_data, linker); + nvdimm_build_ssdt(device_list, table_offsets, table_data, linker); g_slist_free(device_list); }