From patchwork Wed Jul 1 14:50:27 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiao Guangrong X-Patchwork-Id: 6704011 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 5B5B49F380 for ; Wed, 1 Jul 2015 14:55:58 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 5740B20649 for ; Wed, 1 Jul 2015 14:55:57 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 3A0DC20651 for ; Wed, 1 Jul 2015 14:55:56 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752217AbbGAOzv (ORCPT ); Wed, 1 Jul 2015 10:55:51 -0400 Received: from mga03.intel.com ([134.134.136.65]:42802 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754099AbbGAOzo (ORCPT ); Wed, 1 Jul 2015 10:55:44 -0400 Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga103.jf.intel.com with ESMTP; 01 Jul 2015 07:55:44 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.15,386,1432623600"; d="scan'208";a="517239455" Received: from xiao.sh.intel.com ([10.239.159.86]) by FMSMGA003.fm.intel.com with ESMTP; 01 Jul 2015 07:55:41 -0700 From: Xiao Guangrong To: pbonzini@redhat.com, imammedo@redhat.com Cc: gleb@kernel.org, mtosatti@redhat.com, stefanha@redhat.com, mst@redhat.com, rth@twiddle.net, ehabkost@redhat.com, kvm@vger.kernel.org, qemu-devel@nongnu.org, Xiao Guangrong Subject: [PATCH 11/16] nvdimm: build ACPI nvdimm devices Date: Wed, 1 Jul 2015 22:50:27 +0800 Message-Id: <1435762232-15543-12-git-send-email-guangrong.xiao@linux.intel.com> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1435762232-15543-1-git-send-email-guangrong.xiao@linux.intel.com> References: <1435762232-15543-1-git-send-email-guangrong.xiao@linux.intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-7.5 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP NVDIMM devices is defined in ACPI 6.0 9.20 NVDIMM Devices This is a root device under \_SB and specified NVDIMM device are under the root device. Each NVDIMM device has _ADR which return its handle used to associate MEMDEV table in NFIT We reserve handle 0 for root device. In this patch, we save handle, arg0, arg1 and arg2. Arg3 is conditionally saved in later patch Signed-off-by: Xiao Guangrong --- hw/i386/acpi-build.c | 2 + hw/mem/pc-nvdimm.c | 126 +++++++++++++++++++++++++++++++++++++++++++++ include/hw/mem/pc-nvdimm.h | 6 +++ 3 files changed, 134 insertions(+) diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 80c21be..85c7226 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -1342,6 +1342,8 @@ build_ssdt(GArray *table_data, GArray *linker, aml_append(sb_scope, scope); } } + + pc_nvdimm_build_acpi_devices(sb_scope); aml_append(ssdt, sb_scope); } diff --git a/hw/mem/pc-nvdimm.c b/hw/mem/pc-nvdimm.c index 4c290cb..0e2a9d5 100644 --- a/hw/mem/pc-nvdimm.c +++ b/hw/mem/pc-nvdimm.c @@ -32,6 +32,7 @@ #define PAGE_SIZE (1UL << 12) +#define NOTIFY_VALUE (0x99) #define MAX_NVDIMM_NUMBER (10) #define MIN_CONFIG_DATA_SIZE (128 << 10) @@ -348,12 +349,15 @@ struct dsm_buffer { static uint64_t dsm_read(void *opaque, hwaddr addr, unsigned size) { + fprintf(stderr, "BUG: we never read DSM notification MMIO.\n"); + assert(0); return 0; } static void dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { + assert(val == NOTIFY_VALUE); } static const MemoryRegionOps dsm_ops = { @@ -429,6 +433,128 @@ exit: g_slist_free(list); } +#define BUILD_STA_METHOD(_dev_, _method_) \ + do { \ + _method_ = aml_method("_STA", 0); \ + aml_append(_method_, aml_return(aml_int(0x0f))); \ + aml_append(_dev_, _method_); \ + } while (0) + +#define SAVE_ARG012_HANDLE(_method_, _handle_) \ + do { \ + aml_append(_method_, aml_store(_handle_, aml_name("HDLE"))); \ + aml_append(_method_, aml_store(aml_arg(0), aml_name("ARG0"))); \ + aml_append(_method_, aml_store(aml_arg(1), aml_name("ARG1"))); \ + aml_append(_method_, aml_store(aml_arg(2), aml_name("ARG2"))); \ + } while (0) + +#define NOTIFY_AND_RETURN(_method_) \ + do { \ + aml_append(_method_, aml_store(aml_int(NOTIFY_VALUE), \ + aml_name("NOTI"))); \ + aml_append(_method_, aml_return(aml_name("ODAT"))); \ + } while (0) + +static void build_nvdimm_devices(Aml *root_dev, GSList *list) +{ + for (; list; list = list->next) { + PCNVDIMMDevice *nvdimm = list->data; + uint32_t handle = nvdimm_index_to_handle(nvdimm->device_index); + Aml *dev, *method; + + dev = aml_device("NVD%d", nvdimm->device_index); + aml_append(dev, aml_name_decl("_ADR", aml_int(handle))); + + BUILD_STA_METHOD(dev, method); + + method = aml_method("_DSM", 4); + { + SAVE_ARG012_HANDLE(method, aml_int(handle)); + NOTIFY_AND_RETURN(method); + } + aml_append(dev, method); + + aml_append(root_dev, dev); + } +} + +void pc_nvdimm_build_acpi_devices(Aml *sb_scope) +{ + Aml *dev, *method, *field; + struct dsm_buffer *dsm_buf; + GSList *list = get_nvdimm_built_list(); + int nr = get_nvdimm_device_number(list); + + if (nr <= 0 || nr > MAX_NVDIMM_NUMBER) { + g_slist_free(list); + return; + } + + dev = aml_device("NVDR"); + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012"))); + + /* map DSM buffer into ACPI namespace. */ + aml_append(dev, aml_operation_region("DSMR", AML_SYSTEM_MEMORY, + nvdimms_info.dsm_addr, nvdimms_info.dsm_size)); + + /* + * DSM input: + * @HDLE: store device's handle, it's zero if the _DSM call happens + * on ROOT. + * @ARG0 ~ @ARG3: store the parameters of _DSM call. + * + * They are ram mapping on host so that these access never cause VM-EXIT. + */ + field = aml_field("DSMR", AML_DWORD_ACC, AML_PRESERVE); + aml_append(field, aml_named_field("HDLE", + sizeof(dsm_buf->handle) * BITS_PER_BYTE)); + aml_append(field, aml_named_field("ARG0", + sizeof(dsm_buf->arg0) * BITS_PER_BYTE)); + aml_append(field, aml_named_field("ARG1", + sizeof(dsm_buf->arg1) * BITS_PER_BYTE)); + aml_append(field, aml_named_field("ARG2", + sizeof(dsm_buf->arg2) * BITS_PER_BYTE)); + aml_append(field, aml_named_field("ARG3", + sizeof(dsm_buf->arg3) * BITS_PER_BYTE)); + /* + * DSM input: + * @NOTI: write value to it will notify QEMU that _DSM method is being + * called and the parameters can be found in dsm_buf. + * + * It is MMIO mapping on host so that it will cause VM-exit and QEMU + * gets control. + */ + aml_append(field, aml_named_field("NOTI", + sizeof(dsm_buf->notify) * BITS_PER_BYTE)); + aml_append(dev, field); + + /* + * DSM output: + * @ODAT: it resues the first page of dsm buffer and QEMU uses it to + * stores the result + * + * Since the first page is reused by both input and out, the input data + * will be lost after storing new result into @ODAT + */ + field = aml_field("DSMR", AML_DWORD_ACC, AML_PRESERVE); + aml_append(field, aml_named_field("ODAT", PAGE_SIZE * BITS_PER_BYTE)); + aml_append(dev, field); + + BUILD_STA_METHOD(dev, method); + + method = aml_method("_DSM", 4); + { + SAVE_ARG012_HANDLE(method, aml_int(0)); + NOTIFY_AND_RETURN(method); + } + aml_append(dev, method); + + build_nvdimm_devices(dev, list); + + aml_append(sb_scope, dev); + g_slist_free(list); +} + static char *get_file(Object *obj, Error **errp) { PCNVDIMMDevice *nvdimm = PC_NVDIMM(obj); diff --git a/include/hw/mem/pc-nvdimm.h b/include/hw/mem/pc-nvdimm.h index 74d989b..eb916e5 100644 --- a/include/hw/mem/pc-nvdimm.h +++ b/include/hw/mem/pc-nvdimm.h @@ -14,6 +14,7 @@ #define __PC_NVDIMM_H #include "hw/qdev.h" +#include "hw/acpi/aml-build.h" #ifdef CONFIG_LINUX typedef struct PCNVDIMMDevice { @@ -36,6 +37,7 @@ typedef struct PCNVDIMMDevice { void pc_nvdimm_reserve_range(ram_addr_t offset); void pc_nvdimm_build_nfit_table(GArray *table_offsets, GArray *table_data, GArray *linker); +void pc_nvdimm_build_acpi_devices(Aml *sb_scope); #else /* !CONFIG_LINUX */ static inline void pc_nvdimm_reserve_range(ram_addr_t offset) { @@ -46,5 +48,9 @@ pc_nvdimm_build_nfit_table(GArray *table_offsets, GArray *table_data, GArray *linker) { } + +static inline void pc_nvdimm_build_acpi_devices(Aml *sb_scope) +{ +} #endif #endif