From patchwork Sun Apr 23 16:20:08 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Jonathan Cameron X-Patchwork-Id: 13221402 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id D03CAC77B73 for ; Sun, 23 Apr 2023 16:21:18 +0000 (UTC) Received: from localhost ([::1] helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1pqcSE-0000e1-Hx; Sun, 23 Apr 2023 12:20:46 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcSC-0000ct-N2 for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:20:44 -0400 Received: from frasgout.his.huawei.com ([185.176.79.56]) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcSA-0004nZ-T7 for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:20:44 -0400 Received: from lhrpeml500005.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4Q4D1x0CGgz67pY8; Mon, 24 Apr 2023 00:17:41 +0800 (CST) Received: from SecurePC-101-06.china.huawei.com (10.122.247.231) by lhrpeml500005.china.huawei.com (7.191.163.240) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Sun, 23 Apr 2023 17:20:39 +0100 To: , Michael Tsirkin , Fan Ni CC: , , Ira Weiny , Alison Schofield , Michael Roth , =?utf-8?q?Philippe_Mathieu-Daud=C3=A9?= , Dave Jiang , Markus Armbruster , =?utf-8?q?Daniel_P_=2E_Berrang=C3=A9?= , Eric Blake , Mike Maslenkin , =?utf-8?q?Marc-Andr=C3=A9_Lureau?= , Thomas Huth Subject: [PATCH v5 1/6] hw/cxl: rename mailbox return code type from ret_code to CXLRetCode Date: Sun, 23 Apr 2023 17:20:08 +0100 Message-ID: <20230423162013.4535-2-Jonathan.Cameron@huawei.com> X-Mailer: git-send-email 2.37.2 In-Reply-To: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> References: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.122.247.231] X-ClientProxiedBy: lhrpeml500004.china.huawei.com (7.191.163.9) To lhrpeml500005.china.huawei.com (7.191.163.240) X-CFilter-Loop: Reflected Received-SPF: pass client-ip=185.176.79.56; envelope-from=jonathan.cameron@huawei.com; helo=frasgout.his.huawei.com X-Spam_score_int: -41 X-Spam_score: -4.2 X-Spam_bar: ---- X-Spam_report: (-4.2 / 5.0 requ) BAYES_00=-1.9, RCVD_IN_DNSWL_MED=-2.3, RCVD_IN_MSPIKE_H2=-0.001, SPF_HELO_NONE=0.001, SPF_PASS=-0.001, T_SCC_BODY_TEXT_LINE=-0.01 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-to: Jonathan Cameron X-Patchwork-Original-From: Jonathan Cameron via From: Jonathan Cameron Errors-To: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Sender: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Given the increasing usage of this mailbox return code type, now is a good time to switch to QEMU style naming. Reviewed-by: Ira Weiny Reviewed-by: Fan Ni Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Jonathan Cameron --- v5: Picked up Philippe's tag. --- hw/cxl/cxl-mailbox-utils.c | 64 +++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index ed663cc04a..7b2aef0d67 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -23,7 +23,7 @@ * FOO = 0x7f, * #define BAR 0 * 2. Implement the handler - * static ret_code cmd_foo_bar(struct cxl_cmd *cmd, + * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd, * CXLDeviceState *cxl_dstate, uint16_t *len) * 3. Add the command to the cxl_cmd_set[][] * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y }, @@ -90,10 +90,10 @@ typedef enum { CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15, CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16, CXL_MBOX_MAX = 0x17 -} ret_code; +} CXLRetCode; struct cxl_cmd; -typedef ret_code (*opcode_handler)(struct cxl_cmd *cmd, +typedef CXLRetCode (*opcode_handler)(struct cxl_cmd *cmd, CXLDeviceState *cxl_dstate, uint16_t *len); struct cxl_cmd { const char *name; @@ -105,16 +105,16 @@ struct cxl_cmd { #define DEFINE_MAILBOX_HANDLER_ZEROED(name, size) \ uint16_t __zero##name = size; \ - static ret_code cmd_##name(struct cxl_cmd *cmd, \ - CXLDeviceState *cxl_dstate, uint16_t *len) \ + static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \ + CXLDeviceState *cxl_dstate, uint16_t *len) \ { \ *len = __zero##name; \ memset(cmd->payload, 0, *len); \ return CXL_MBOX_SUCCESS; \ } #define DEFINE_MAILBOX_HANDLER_NOP(name) \ - static ret_code cmd_##name(struct cxl_cmd *cmd, \ - CXLDeviceState *cxl_dstate, uint16_t *len) \ + static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \ + CXLDeviceState *cxl_dstate, uint16_t *len) \ { \ return CXL_MBOX_SUCCESS; \ } @@ -125,9 +125,9 @@ DEFINE_MAILBOX_HANDLER_ZEROED(events_get_interrupt_policy, 4); DEFINE_MAILBOX_HANDLER_NOP(events_set_interrupt_policy); /* 8.2.9.2.1 */ -static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint8_t slots_supported; @@ -159,9 +159,9 @@ static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, } /* 8.2.9.3.1 */ -static ret_code cmd_timestamp_get(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_timestamp_get(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { uint64_t time, delta; uint64_t final_time = 0; @@ -181,7 +181,7 @@ static ret_code cmd_timestamp_get(struct cxl_cmd *cmd, } /* 8.2.9.3.2 */ -static ret_code cmd_timestamp_set(struct cxl_cmd *cmd, +static CXLRetCode cmd_timestamp_set(struct cxl_cmd *cmd, CXLDeviceState *cxl_dstate, uint16_t *len) { @@ -201,9 +201,9 @@ static const QemuUUID cel_uuid = { }; /* 8.2.9.4.1 */ -static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_logs_get_supported(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint16_t entries; @@ -224,9 +224,9 @@ static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd, } /* 8.2.9.4.2 */ -static ret_code cmd_logs_get_log(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { QemuUUID uuid; @@ -265,9 +265,9 @@ static ret_code cmd_logs_get_log(struct cxl_cmd *cmd, } /* 8.2.9.5.1.1 */ -static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { char fw_revision[0x10]; @@ -309,9 +309,9 @@ static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_get_partition_info(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint64_t active_vmem; @@ -339,9 +339,9 @@ static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -static ret_code cmd_ccls_get_lsa(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_get_lsa(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct { uint32_t offset; @@ -364,9 +364,9 @@ static ret_code cmd_ccls_get_lsa(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -static ret_code cmd_ccls_set_lsa(struct cxl_cmd *cmd, - CXLDeviceState *cxl_dstate, - uint16_t *len) +static CXLRetCode cmd_ccls_set_lsa(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) { struct set_lsa_pl { uint32_t offset; From patchwork Sun Apr 23 16:20:09 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jonathan Cameron X-Patchwork-Id: 13221404 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id B8C0CC77B73 for ; Sun, 23 Apr 2023 16:21:35 +0000 (UTC) Received: from localhost ([::1] helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1pqcSl-0000q5-5V; Sun, 23 Apr 2023 12:21:19 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcSk-0000oG-9i for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:21:18 -0400 Received: from frasgout.his.huawei.com ([185.176.79.56]) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcSf-0004pr-C5 for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:21:18 -0400 Received: from lhrpeml500005.china.huawei.com (unknown [172.18.147.207]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4Q4D4X3MwQz67H66; Mon, 24 Apr 2023 00:19:56 +0800 (CST) Received: from SecurePC-101-06.china.huawei.com (10.122.247.231) by lhrpeml500005.china.huawei.com (7.191.163.240) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Sun, 23 Apr 2023 17:21:10 +0100 To: , Michael Tsirkin , Fan Ni CC: , , Ira Weiny , Alison Schofield , Michael Roth , =?utf-8?q?Philippe_Mathieu-Daud=C3=A9?= , Dave Jiang , Markus Armbruster , =?utf-8?q?Daniel_P_=2E_Berrang=C3=A9?= , Eric Blake , Mike Maslenkin , =?utf-8?q?Marc-Andr=C3=A9_Lureau?= , Thomas Huth Subject: [PATCH v5 2/6] hw/cxl: Introduce cxl_device_get_timestamp() utility function Date: Sun, 23 Apr 2023 17:20:09 +0100 Message-ID: <20230423162013.4535-3-Jonathan.Cameron@huawei.com> X-Mailer: git-send-email 2.37.2 In-Reply-To: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> References: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.122.247.231] X-ClientProxiedBy: lhrpeml500005.china.huawei.com (7.191.163.240) To lhrpeml500005.china.huawei.com (7.191.163.240) X-CFilter-Loop: Reflected Received-SPF: pass client-ip=185.176.79.56; envelope-from=jonathan.cameron@huawei.com; helo=frasgout.his.huawei.com X-Spam_score_int: -41 X-Spam_score: -4.2 X-Spam_bar: ---- X-Spam_report: (-4.2 / 5.0 requ) BAYES_00=-1.9, RCVD_IN_DNSWL_MED=-2.3, RCVD_IN_MSPIKE_H2=-0.001, SPF_PASS=-0.001, T_SCC_BODY_TEXT_LINE=-0.01, T_SPF_HELO_TEMPERROR=0.01 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-to: Jonathan Cameron X-Patchwork-Original-From: Jonathan Cameron via From: Jonathan Cameron Errors-To: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Sender: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org From: Ira Weiny There are new users of this functionality coming shortly so factor it out from the GET_TIMESTAMP mailbox command handling. Signed-off-by: Ira Weiny Reviewed-by: Fan Ni Signed-off-by: Jonathan Cameron --- v5: No change. --- hw/cxl/cxl-device-utils.c | 15 +++++++++++++++ hw/cxl/cxl-mailbox-utils.c | 11 +---------- include/hw/cxl/cxl_device.h | 2 ++ 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c index 4c5e88aaf5..86e1cea8ce 100644 --- a/hw/cxl/cxl-device-utils.c +++ b/hw/cxl/cxl-device-utils.c @@ -269,3 +269,18 @@ void cxl_device_register_init_common(CXLDeviceState *cxl_dstate) cxl_initialize_mailbox(cxl_dstate); } + +uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate) +{ + uint64_t time, delta; + uint64_t final_time = 0; + + if (cxl_dstate->timestamp.set) { + /* Find the delta from the last time the host set the time. */ + time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + delta = time - cxl_dstate->timestamp.last_set; + final_time = cxl_dstate->timestamp.host_set + delta; + } + + return final_time; +} diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 7b2aef0d67..702e16ca20 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -163,17 +163,8 @@ static CXLRetCode cmd_timestamp_get(struct cxl_cmd *cmd, CXLDeviceState *cxl_dstate, uint16_t *len) { - uint64_t time, delta; - uint64_t final_time = 0; - - if (cxl_dstate->timestamp.set) { - /* First find the delta from the last time the host set the time. */ - time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - delta = time - cxl_dstate->timestamp.last_set; - final_time = cxl_dstate->timestamp.host_set + delta; - } + uint64_t final_time = cxl_device_get_timestamp(cxl_dstate); - /* Then adjust the actual time */ stq_le_p(cmd->payload, final_time); *len = 8; diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h index edb9791bab..02befda0f6 100644 --- a/include/hw/cxl/cxl_device.h +++ b/include/hw/cxl/cxl_device.h @@ -287,4 +287,6 @@ MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data, MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, unsigned size, MemTxAttrs attrs); +uint64_t cxl_device_get_timestamp(CXLDeviceState *cxlds); + #endif From patchwork Sun Apr 23 16:20:10 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jonathan Cameron X-Patchwork-Id: 13221405 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 57664C77B60 for ; Sun, 23 Apr 2023 16:21:52 +0000 (UTC) Received: from localhost ([::1] helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1pqcTC-0002C6-AT; Sun, 23 Apr 2023 12:21:46 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcTA-00024H-LJ for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:21:44 -0400 Received: from frasgout.his.huawei.com ([185.176.79.56]) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcT9-0004rd-0R for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:21:44 -0400 Received: from lhrpeml500005.china.huawei.com (unknown [172.18.147.200]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4Q4D361nJWz67QQ9; Mon, 24 Apr 2023 00:18:42 +0800 (CST) Received: from SecurePC-101-06.china.huawei.com (10.122.247.231) by lhrpeml500005.china.huawei.com (7.191.163.240) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Sun, 23 Apr 2023 17:21:40 +0100 To: , Michael Tsirkin , Fan Ni CC: , , Ira Weiny , Alison Schofield , Michael Roth , =?utf-8?q?Philippe_Mathieu-Daud=C3=A9?= , Dave Jiang , Markus Armbruster , =?utf-8?q?Daniel_P_=2E_Berrang=C3=A9?= , Eric Blake , Mike Maslenkin , =?utf-8?q?Marc-Andr=C3=A9_Lureau?= , Thomas Huth Subject: [PATCH v5 3/6] bswap: Add the ability to store to an unaligned 24 bit field Date: Sun, 23 Apr 2023 17:20:10 +0100 Message-ID: <20230423162013.4535-4-Jonathan.Cameron@huawei.com> X-Mailer: git-send-email 2.37.2 In-Reply-To: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> References: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.122.247.231] X-ClientProxiedBy: lhrpeml500004.china.huawei.com (7.191.163.9) To lhrpeml500005.china.huawei.com (7.191.163.240) X-CFilter-Loop: Reflected Received-SPF: pass client-ip=185.176.79.56; envelope-from=jonathan.cameron@huawei.com; helo=frasgout.his.huawei.com X-Spam_score_int: -41 X-Spam_score: -4.2 X-Spam_bar: ---- X-Spam_report: (-4.2 / 5.0 requ) BAYES_00=-1.9, RCVD_IN_DNSWL_MED=-2.3, RCVD_IN_MSPIKE_H2=-0.001, SPF_HELO_NONE=0.001, SPF_PASS=-0.001, T_SCC_BODY_TEXT_LINE=-0.01 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-to: Jonathan Cameron X-Patchwork-Original-From: Jonathan Cameron via From: Jonathan Cameron Errors-To: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Sender: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org From: Ira Weiny CXL has 24 bit unaligned fields which need to be stored to. CXL is specified as little endian. Define st24_le_p() and the supporting functions to store such a field from a 32 bit host native value. The use of b, w, l, q as the size specifier is limiting. So "24" was used for the size part of the function name. Reviewed-by: Fan Ni Signed-off-by: Ira Weiny Signed-off-by: Jonathan Cameron --- v5: - Added assertion that upper bits of the input parameter aren't set. - Mask value in bswap24s() - update docs --- docs/devel/loads-stores.rst | 1 + include/qemu/bswap.h | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst index ad5dfe133e..57b4396f7a 100644 --- a/docs/devel/loads-stores.rst +++ b/docs/devel/loads-stores.rst @@ -36,6 +36,7 @@ store: ``st{size}_{endian}_p(ptr, val)`` ``size`` - ``b`` : 8 bits - ``w`` : 16 bits + - ``24`` : 24 bits - ``l`` : 32 bits - ``q`` : 64 bits diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h index 15a78c0db5..91ed9c7e2c 100644 --- a/include/qemu/bswap.h +++ b/include/qemu/bswap.h @@ -8,11 +8,25 @@ #undef bswap64 #define bswap64(_x) __builtin_bswap64(_x) +static inline uint32_t bswap24(uint32_t x) +{ + assert((x & 0xff000000U) == 0); + + return (((x & 0x000000ffU) << 16) | + ((x & 0x0000ff00U) << 0) | + ((x & 0x00ff0000U) >> 16)); +} + static inline void bswap16s(uint16_t *s) { *s = __builtin_bswap16(*s); } +static inline void bswap24s(uint32_t *s) +{ + *s = bswap24(*s & 0x00ffffffU); +} + static inline void bswap32s(uint32_t *s) { *s = __builtin_bswap32(*s); @@ -176,6 +190,7 @@ CPU_CONVERT(le, 64, uint64_t) * size is: * b: 8 bits * w: 16 bits + * 24: 24 bits * l: 32 bits * q: 64 bits * @@ -248,6 +263,11 @@ static inline void stw_he_p(void *ptr, uint16_t v) __builtin_memcpy(ptr, &v, sizeof(v)); } +static inline void st24_he_p(void *ptr, uint32_t v) +{ + __builtin_memcpy(ptr, &v, 3); +} + static inline int ldl_he_p(const void *ptr) { int32_t r; @@ -297,6 +317,11 @@ static inline void stw_le_p(void *ptr, uint16_t v) stw_he_p(ptr, le_bswap(v, 16)); } +static inline void st24_le_p(void *ptr, uint32_t v) +{ + st24_he_p(ptr, le_bswap(v, 24)); +} + static inline void stl_le_p(void *ptr, uint32_t v) { stl_he_p(ptr, le_bswap(v, 32)); From patchwork Sun Apr 23 16:20:11 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jonathan Cameron X-Patchwork-Id: 13221406 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 27309C6FD18 for ; Sun, 23 Apr 2023 16:22:30 +0000 (UTC) Received: from localhost ([::1] helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1pqcTi-00033i-Uf; Sun, 23 Apr 2023 12:22:18 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcTh-0002wJ-8q for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:22:17 -0400 Received: from frasgout.his.huawei.com ([185.176.79.56]) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcTe-0004w6-KS for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:22:16 -0400 Received: from lhrpeml500005.china.huawei.com (unknown [172.18.147.207]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4Q4D1V10yTz67G5Z; Mon, 24 Apr 2023 00:17:18 +0800 (CST) Received: from SecurePC-101-06.china.huawei.com (10.122.247.231) by lhrpeml500005.china.huawei.com (7.191.163.240) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Sun, 23 Apr 2023 17:22:11 +0100 To: , Michael Tsirkin , Fan Ni CC: , , Ira Weiny , Alison Schofield , Michael Roth , =?utf-8?q?Philippe_Mathieu-Daud=C3=A9?= , Dave Jiang , Markus Armbruster , =?utf-8?q?Daniel_P_=2E_Berrang=C3=A9?= , Eric Blake , Mike Maslenkin , =?utf-8?q?Marc-Andr=C3=A9_Lureau?= , Thomas Huth Subject: [PATCH v5 4/6] hw/cxl: QMP based poison injection support Date: Sun, 23 Apr 2023 17:20:11 +0100 Message-ID: <20230423162013.4535-5-Jonathan.Cameron@huawei.com> X-Mailer: git-send-email 2.37.2 In-Reply-To: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> References: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.122.247.231] X-ClientProxiedBy: lhrpeml500005.china.huawei.com (7.191.163.240) To lhrpeml500005.china.huawei.com (7.191.163.240) X-CFilter-Loop: Reflected Received-SPF: pass client-ip=185.176.79.56; envelope-from=jonathan.cameron@huawei.com; helo=frasgout.his.huawei.com X-Spam_score_int: -41 X-Spam_score: -4.2 X-Spam_bar: ---- X-Spam_report: (-4.2 / 5.0 requ) BAYES_00=-1.9, RCVD_IN_DNSWL_MED=-2.3, RCVD_IN_MSPIKE_H2=-0.001, SPF_HELO_NONE=0.001, SPF_PASS=-0.001, T_SCC_BODY_TEXT_LINE=-0.01 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-to: Jonathan Cameron X-Patchwork-Original-From: Jonathan Cameron via From: Jonathan Cameron Errors-To: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Sender: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Inject poison using qmp command cxl-inject-poison to add an entry to the poison list. For now, the poison is not returned CXL.mem reads, but only via the mailbox command Get Poison List. So a normal memory read to an address that is on the poison list will not yet result in a synchronous exception (and similar for partial cacheline writes). That is left for a future patch. See CXL rev 3.0, sec 8.2.9.8.4.1 Get Poison list (Opcode 4300h) Kernel patches to use this interface here: https://lore.kernel.org/linux-cxl/cover.1665606782.git.alison.schofield@intel.com/ To inject poison using qmp (telnet to the qmp port) { "execute": "qmp_capabilities" } { "execute": "cxl-inject-poison", "arguments": { "path": "/machine/peripheral/cxl-pmem0", "start": 2048, "length": 256 } } Adjusted to select a device on your machine. Note that the poison list supported is kept short enough to avoid the complexity of state machine that is needed to handle the MORE flag. Reviewed-by: Fan Ni Reviewed-by: Ira Weiny Signed-off-by: Jonathan Cameron Acked-by: Markus Armbruster --- v5: - Picked up Fan Ni's tag - Use ROUND_DOWN() as suggestion by Philippe - Update qapi docs to 8.1 - Added comment to clarify the lack of synchronous poison exceptions that has been left for a future patch set. --- hw/cxl/cxl-mailbox-utils.c | 90 +++++++++++++++++++++++++++++++++++++ hw/mem/cxl_type3.c | 56 +++++++++++++++++++++++ hw/mem/cxl_type3_stubs.c | 6 +++ include/hw/cxl/cxl.h | 1 + include/hw/cxl/cxl_device.h | 20 +++++++++ qapi/cxl.json | 18 ++++++++ 6 files changed, 191 insertions(+) diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 702e16ca20..1f74b26ea2 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -62,6 +62,8 @@ enum { #define GET_PARTITION_INFO 0x0 #define GET_LSA 0x2 #define SET_LSA 0x3 + MEDIA_AND_POISON = 0x43, + #define GET_POISON_LIST 0x0 }; /* 8.2.8.4.5.1 Command Return Codes */ @@ -295,6 +297,10 @@ static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd, stq_le_p(&id->persistent_capacity, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER); stq_le_p(&id->volatile_capacity, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER); stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d)); + /* 256 poison records */ + st24_le_p(id->poison_list_max_mer, 256); + /* No limit - so limited by main poison record limit */ + stw_le_p(&id->inject_poison_limit, 0); *len = sizeof(*id); return CXL_MBOX_SUCCESS; @@ -384,6 +390,88 @@ static CXLRetCode cmd_ccls_set_lsa(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +/* + * This is very inefficient, but good enough for now! + * Also the payload will always fit, so no need to handle the MORE flag and + * make this stateful. We may want to allow longer poison lists to aid + * testing that kernel functionality. + */ +static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len) +{ + struct get_poison_list_pl { + uint64_t pa; + uint64_t length; + } QEMU_PACKED; + + struct get_poison_list_out_pl { + uint8_t flags; + uint8_t rsvd1; + uint64_t overflow_timestamp; + uint16_t count; + uint8_t rsvd2[0x14]; + struct { + uint64_t addr; + uint32_t length; + uint32_t resv; + } QEMU_PACKED records[]; + } QEMU_PACKED; + + struct get_poison_list_pl *in = (void *)cmd->payload; + struct get_poison_list_out_pl *out = (void *)cmd->payload; + CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + uint16_t record_count = 0, i = 0; + uint64_t query_start, query_length; + CXLPoisonList *poison_list = &ct3d->poison_list; + CXLPoison *ent; + uint16_t out_pl_len; + + query_start = ldq_le_p(&in->pa); + /* 64 byte alignemnt required */ + if (query_start & 0x3f) { + return CXL_MBOX_INVALID_INPUT; + } + query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE; + + QLIST_FOREACH(ent, poison_list, node) { + /* Check for no overlap */ + if (ent->start >= query_start + query_length || + ent->start + ent->length <= query_start) { + continue; + } + record_count++; + } + out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); + assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); + + memset(out, 0, out_pl_len); + QLIST_FOREACH(ent, poison_list, node) { + uint64_t start, stop; + + /* Check for no overlap */ + if (ent->start >= query_start + query_length || + ent->start + ent->length <= query_start) { + continue; + } + + /* Deal with overlap */ + start = MAX(ROUND_DOWN(ent->start, 64ull), query_start); + stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length, + query_start + query_length); + stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); + stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); + i++; + } + if (ct3d->poison_list_overflowed) { + out->flags = (1 << 1); + stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts); + } + stw_le_p(&out->count, record_count); + *len = out_pl_len; + return CXL_MBOX_SUCCESS; +} + #define IMMEDIATE_CONFIG_CHANGE (1 << 1) #define IMMEDIATE_DATA_CHANGE (1 << 2) #define IMMEDIATE_POLICY_CHANGE (1 << 3) @@ -411,6 +499,8 @@ static struct cxl_cmd cxl_cmd_set[256][256] = { [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE }, + [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", + cmd_media_get_poison_list, 16, 0 }, }; void cxl_process_mailbox(CXLDeviceState *cxl_dstate) diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index 2adacbd01b..ab600735eb 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -947,6 +947,62 @@ static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size, */ } +void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d) +{ + ct3d->poison_list_overflowed = true; + ct3d->poison_list_overflow_ts = + cxl_device_get_timestamp(&ct3d->cxl_dstate); +} + +void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length, + Error **errp) +{ + Object *obj = object_resolve_path(path, NULL); + CXLType3Dev *ct3d; + CXLPoison *p; + + if (length % 64) { + error_setg(errp, "Poison injection must be in multiples of 64 bytes"); + return; + } + if (start % 64) { + error_setg(errp, "Poison start address must be 64 byte aligned"); + return; + } + if (!obj) { + error_setg(errp, "Unable to resolve path"); + return; + } + if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) { + error_setg(errp, "Path does not point to a CXL type 3 device"); + return; + } + + ct3d = CXL_TYPE3(obj); + + QLIST_FOREACH(p, &ct3d->poison_list, node) { + if (((start >= p->start) && (start < p->start + p->length)) || + ((start + length > p->start) && + (start + length <= p->start + p->length))) { + error_setg(errp, "Overlap with existing poisoned region not supported"); + return; + } + } + + if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { + cxl_set_poison_list_overflowed(ct3d); + return; + } + + p = g_new0(CXLPoison, 1); + p->length = length; + p->start = start; + p->type = CXL_POISON_TYPE_INTERNAL; /* Different from injected via the mbox */ + + QLIST_INSERT_HEAD(&ct3d->poison_list, p, node); + ct3d->poison_list_cnt++; +} + /* For uncorrectable errors include support for multiple header recording */ void qmp_cxl_inject_uncorrectable_errors(const char *path, CXLUncorErrorRecordList *errors, diff --git a/hw/mem/cxl_type3_stubs.c b/hw/mem/cxl_type3_stubs.c index d574c58f9a..fd1166a610 100644 --- a/hw/mem/cxl_type3_stubs.c +++ b/hw/mem/cxl_type3_stubs.c @@ -3,6 +3,12 @@ #include "qapi/error.h" #include "qapi/qapi-commands-cxl.h" +void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length, + Error **errp) +{ + error_setg(errp, "CXL Type 3 support is not compiled in"); +} + void qmp_cxl_inject_uncorrectable_errors(const char *path, CXLUncorErrorRecordList *errors, Error **errp) diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h index c453983e83..56c9e7676e 100644 --- a/include/hw/cxl/cxl.h +++ b/include/hw/cxl/cxl.h @@ -18,6 +18,7 @@ #include "cxl_component.h" #include "cxl_device.h" +#define CXL_CACHE_LINE_SIZE 64 #define CXL_COMPONENT_REG_BAR_IDX 0 #define CXL_DEVICE_REG_BAR_IDX 2 diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h index 02befda0f6..32c234ea91 100644 --- a/include/hw/cxl/cxl_device.h +++ b/include/hw/cxl/cxl_device.h @@ -242,6 +242,18 @@ typedef struct CXLError { typedef QTAILQ_HEAD(, CXLError) CXLErrorList; +typedef struct CXLPoison { + uint64_t start, length; + uint8_t type; +#define CXL_POISON_TYPE_EXTERNAL 0x1 +#define CXL_POISON_TYPE_INTERNAL 0x2 +#define CXL_POISON_TYPE_INJECTED 0x3 + QLIST_ENTRY(CXLPoison) node; +} CXLPoison; + +typedef QLIST_HEAD(, CXLPoison) CXLPoisonList; +#define CXL_POISON_LIST_LIMIT 256 + struct CXLType3Dev { /* Private */ PCIDevice parent_obj; @@ -264,6 +276,12 @@ struct CXLType3Dev { /* Error injection */ CXLErrorList error_list; + + /* Poison Injection - cache */ + CXLPoisonList poison_list; + unsigned int poison_list_cnt; + bool poison_list_overflowed; + uint64_t poison_list_overflow_ts; }; #define TYPE_CXL_TYPE3 "cxl-type3" @@ -289,4 +307,6 @@ MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data, uint64_t cxl_device_get_timestamp(CXLDeviceState *cxlds); +void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d); + #endif diff --git a/qapi/cxl.json b/qapi/cxl.json index 4be7d46041..ca3af3f0b2 100644 --- a/qapi/cxl.json +++ b/qapi/cxl.json @@ -5,6 +5,24 @@ # = CXL devices ## +## +# @cxl-inject-poison: +# +# Poison records indicate that a CXL memory device knows that a particular +# memory region may be corrupted. This may be because of locally detected +# errors (e.g. ECC failure) or poisoned writes received from other components +# in the system. This injection mechanism enables testing of the OS handling +# of poison records which may be queried via the CXL mailbox. +# +# @path: CXL type 3 device canonical QOM path +# @start: Start address - must be 64 byte aligned. +# @length: Length of poison to inject - must be a multiple of 64 bytes. +# +# Since: 8.1 +## +{ 'command': 'cxl-inject-poison', + 'data': { 'path': 'str', 'start': 'uint64', 'length': 'uint64' }} + ## # @CxlUncorErrorType: # From patchwork Sun Apr 23 16:20:12 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jonathan Cameron X-Patchwork-Id: 13221407 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 9D10EC6FD18 for ; Sun, 23 Apr 2023 16:22:49 +0000 (UTC) Received: from localhost ([::1] helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1pqcUB-0003s9-0c; Sun, 23 Apr 2023 12:22:47 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcU9-0003rd-RC for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:22:45 -0400 Received: from frasgout.his.huawei.com ([185.176.79.56]) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcU8-0004xW-6n for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:22:45 -0400 Received: from lhrpeml500005.china.huawei.com (unknown [172.18.147.200]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4Q4D6J1svFz67G90; Mon, 24 Apr 2023 00:21:28 +0800 (CST) Received: from SecurePC-101-06.china.huawei.com (10.122.247.231) by lhrpeml500005.china.huawei.com (7.191.163.240) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Sun, 23 Apr 2023 17:22:41 +0100 To: , Michael Tsirkin , Fan Ni CC: , , Ira Weiny , Alison Schofield , Michael Roth , =?utf-8?q?Philippe_Mathieu-Daud=C3=A9?= , Dave Jiang , Markus Armbruster , =?utf-8?q?Daniel_P_=2E_Berrang=C3=A9?= , Eric Blake , Mike Maslenkin , =?utf-8?q?Marc-Andr=C3=A9_Lureau?= , Thomas Huth Subject: [PATCH v5 5/6] hw/cxl: Add poison injection via the mailbox. Date: Sun, 23 Apr 2023 17:20:12 +0100 Message-ID: <20230423162013.4535-6-Jonathan.Cameron@huawei.com> X-Mailer: git-send-email 2.37.2 In-Reply-To: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> References: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.122.247.231] X-ClientProxiedBy: lhrpeml500004.china.huawei.com (7.191.163.9) To lhrpeml500005.china.huawei.com (7.191.163.240) X-CFilter-Loop: Reflected Received-SPF: pass client-ip=185.176.79.56; envelope-from=jonathan.cameron@huawei.com; helo=frasgout.his.huawei.com X-Spam_score_int: -41 X-Spam_score: -4.2 X-Spam_bar: ---- X-Spam_report: (-4.2 / 5.0 requ) BAYES_00=-1.9, RCVD_IN_DNSWL_MED=-2.3, RCVD_IN_MSPIKE_H2=-0.001, SPF_HELO_NONE=0.001, SPF_PASS=-0.001, T_SCC_BODY_TEXT_LINE=-0.01 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-to: Jonathan Cameron X-Patchwork-Original-From: Jonathan Cameron via From: Jonathan Cameron Errors-To: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Sender: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Very simple implementation to allow testing of corresponding kernel code. Note that for now we track each 64 byte section independently. Whilst a valid implementation choice, it may make sense to fuse entries so as to prove out more complex corners of the kernel code. Reviewed-by: Ira Weiny Reviewed-by: Fan Ni Signed-off-by: Jonathan Cameron --- v5: - Use CXL_CACHE_LINE_SIZE for the minimum poison granularity. - Rename len parameter to len_unused to indicate that it is intentionally not used in this function. (comment was made on patch 6 but applies here as well). Note I have not addressed Philippe's question on CXLDeviceState not being QDev based as that needs further investigation and would require an additional patch set to make the change. --- hw/cxl/cxl-mailbox-utils.c | 42 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 1f74b26ea2..6c476ad7f4 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -64,6 +64,7 @@ enum { #define SET_LSA 0x3 MEDIA_AND_POISON = 0x43, #define GET_POISON_LIST 0x0 + #define INJECT_POISON 0x1 }; /* 8.2.8.4.5.1 Command Return Codes */ @@ -472,6 +473,45 @@ static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +static CXLRetCode cmd_media_inject_poison(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len_unused) +{ + CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + CXLPoisonList *poison_list = &ct3d->poison_list; + CXLPoison *ent; + struct inject_poison_pl { + uint64_t dpa; + }; + struct inject_poison_pl *in = (void *)cmd->payload; + uint64_t dpa = ldq_le_p(&in->dpa); + CXLPoison *p; + + QLIST_FOREACH(ent, poison_list, node) { + if (dpa >= ent->start && + dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) { + return CXL_MBOX_SUCCESS; + } + } + + if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { + return CXL_MBOX_INJECT_POISON_LIMIT; + } + p = g_new0(CXLPoison, 1); + + p->length = CXL_CACHE_LINE_SIZE; + p->start = dpa; + p->type = CXL_POISON_TYPE_INJECTED; + + /* + * Possible todo: Merge with existing entry if next to it and if same type + */ + QLIST_INSERT_HEAD(poison_list, p, node); + ct3d->poison_list_cnt++; + + return CXL_MBOX_SUCCESS; +} + #define IMMEDIATE_CONFIG_CHANGE (1 << 1) #define IMMEDIATE_DATA_CHANGE (1 << 2) #define IMMEDIATE_POLICY_CHANGE (1 << 3) @@ -501,6 +541,8 @@ static struct cxl_cmd cxl_cmd_set[256][256] = { ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE }, [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", cmd_media_get_poison_list, 16, 0 }, + [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", + cmd_media_inject_poison, 8, 0 }, }; void cxl_process_mailbox(CXLDeviceState *cxl_dstate) From patchwork Sun Apr 23 16:20:13 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jonathan Cameron X-Patchwork-Id: 13221408 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 0FEE6C6FD18 for ; Sun, 23 Apr 2023 16:23:29 +0000 (UTC) Received: from localhost ([::1] helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1pqcUg-0004Ui-Cb; Sun, 23 Apr 2023 12:23:18 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcUe-0004U2-LE for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:23:16 -0400 Received: from frasgout.his.huawei.com ([185.176.79.56]) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1pqcUc-00050J-Q2 for qemu-devel@nongnu.org; Sun, 23 Apr 2023 12:23:16 -0400 Received: from lhrpeml500005.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4Q4D4t0QPyz67ljc; Mon, 24 Apr 2023 00:20:14 +0800 (CST) Received: from SecurePC-101-06.china.huawei.com (10.122.247.231) by lhrpeml500005.china.huawei.com (7.191.163.240) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Sun, 23 Apr 2023 17:23:12 +0100 To: , Michael Tsirkin , Fan Ni CC: , , Ira Weiny , Alison Schofield , Michael Roth , =?utf-8?q?Philippe_Mathieu-Daud=C3=A9?= , Dave Jiang , Markus Armbruster , =?utf-8?q?Daniel_P_=2E_Berrang=C3=A9?= , Eric Blake , Mike Maslenkin , =?utf-8?q?Marc-Andr=C3=A9_Lureau?= , Thomas Huth Subject: [PATCH v5 6/6] hw/cxl: Add clear poison mailbox command support. Date: Sun, 23 Apr 2023 17:20:13 +0100 Message-ID: <20230423162013.4535-7-Jonathan.Cameron@huawei.com> X-Mailer: git-send-email 2.37.2 In-Reply-To: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> References: <20230423162013.4535-1-Jonathan.Cameron@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.122.247.231] X-ClientProxiedBy: lhrpeml500004.china.huawei.com (7.191.163.9) To lhrpeml500005.china.huawei.com (7.191.163.240) X-CFilter-Loop: Reflected Received-SPF: pass client-ip=185.176.79.56; envelope-from=jonathan.cameron@huawei.com; helo=frasgout.his.huawei.com X-Spam_score_int: -41 X-Spam_score: -4.2 X-Spam_bar: ---- X-Spam_report: (-4.2 / 5.0 requ) BAYES_00=-1.9, RCVD_IN_DNSWL_MED=-2.3, RCVD_IN_MSPIKE_H2=-0.001, SPF_HELO_NONE=0.001, SPF_PASS=-0.001, T_SCC_BODY_TEXT_LINE=-0.01 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-to: Jonathan Cameron X-Patchwork-Original-From: Jonathan Cameron via From: Jonathan Cameron Errors-To: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Sender: qemu-devel-bounces+qemu-devel=archiver.kernel.org@nongnu.org Current implementation is very simple so many of the corner cases do not exist (e.g. fragmenting larger poison list entries) Reviewed-by: Fan Ni Signed-off-by: Jonathan Cameron --- v5: - Much simpler identification of the entry to modify (Ira) - Use CXL_CACHE_LINE_SIZE instead of 64. (Philippe) - Use memory_region_size() instead of accessing directly. (Michael) - Rename unused len parameter len_unused to make it clear that (Fan) for a fixed length input payload, this parameter has already been checked so the function need not do anything with it. --- hw/cxl/cxl-mailbox-utils.c | 82 +++++++++++++++++++++++++++++++++++++ hw/mem/cxl_type3.c | 37 +++++++++++++++++ include/hw/cxl/cxl_device.h | 1 + 3 files changed, 120 insertions(+) diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 6c476ad7f4..e3401b6be8 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -65,6 +65,7 @@ enum { MEDIA_AND_POISON = 0x43, #define GET_POISON_LIST 0x0 #define INJECT_POISON 0x1 + #define CLEAR_POISON 0x2 }; /* 8.2.8.4.5.1 Command Return Codes */ @@ -512,6 +513,85 @@ static CXLRetCode cmd_media_inject_poison(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +static CXLRetCode cmd_media_clear_poison(struct cxl_cmd *cmd, + CXLDeviceState *cxl_dstate, + uint16_t *len_unused) +{ + CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate); + CXLPoisonList *poison_list = &ct3d->poison_list; + CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); + struct clear_poison_pl { + uint64_t dpa; + uint8_t data[64]; + }; + CXLPoison *ent; + uint64_t dpa; + + struct clear_poison_pl *in = (void *)cmd->payload; + + dpa = ldq_le_p(&in->dpa); + if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->mem_size) { + return CXL_MBOX_INVALID_PA; + } + + /* Clearing a region with no poison is not an error so always do so */ + if (cvc->set_cacheline) { + if (!cvc->set_cacheline(ct3d, dpa, in->data)) { + return CXL_MBOX_INTERNAL_ERROR; + } + } + + QLIST_FOREACH(ent, poison_list, node) { + /* + * Test for contained in entry. Simpler than general case + * as clearing 64 bytes and entries 64 byte aligned + */ + if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) { + break; + } + } + if (!ent) { + return CXL_MBOX_SUCCESS; + } + + QLIST_REMOVE(ent, node); + ct3d->poison_list_cnt--; + + if (dpa > ent->start) { + CXLPoison *frag; + /* Cannot overflow as replacing existing entry */ + + frag = g_new0(CXLPoison, 1); + + frag->start = ent->start; + frag->length = dpa - ent->start; + frag->type = ent->type; + + QLIST_INSERT_HEAD(poison_list, frag, node); + ct3d->poison_list_cnt++; + } + + if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) { + CXLPoison *frag; + + if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) { + cxl_set_poison_list_overflowed(ct3d); + } else { + frag = g_new0(CXLPoison, 1); + + frag->start = dpa + CXL_CACHE_LINE_SIZE; + frag->length = ent->start + ent->length - frag->start; + frag->type = ent->type; + QLIST_INSERT_HEAD(poison_list, frag, node); + ct3d->poison_list_cnt++; + } + } + /* Any fragments have been added, free original entry */ + g_free(ent); + + return CXL_MBOX_SUCCESS; +} + #define IMMEDIATE_CONFIG_CHANGE (1 << 1) #define IMMEDIATE_DATA_CHANGE (1 << 2) #define IMMEDIATE_POLICY_CHANGE (1 << 3) @@ -543,6 +623,8 @@ static struct cxl_cmd cxl_cmd_set[256][256] = { cmd_media_get_poison_list, 16, 0 }, [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON", cmd_media_inject_poison, 8, 0 }, + [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON", + cmd_media_clear_poison, 72, 0 }, }; void cxl_process_mailbox(CXLDeviceState *cxl_dstate) diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index ab600735eb..a247f506b7 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -947,6 +947,42 @@ static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size, */ } +static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data) +{ + MemoryRegion *vmr = NULL, *pmr = NULL; + AddressSpace *as; + + if (ct3d->hostvmem) { + vmr = host_memory_backend_get_memory(ct3d->hostvmem); + } + if (ct3d->hostpmem) { + pmr = host_memory_backend_get_memory(ct3d->hostpmem); + } + + if (!vmr && !pmr) { + return false; + } + + if (dpa_offset + 64 > int128_get64(ct3d->cxl_dstate.mem_size)) { + return false; + } + + if (vmr) { + if (dpa_offset < memory_region_size(vmr)) { + as = &ct3d->hostvmem_as; + } else { + as = &ct3d->hostpmem_as; + dpa_offset -= memory_region_size(vmr); + } + } else { + as = &ct3d->hostpmem_as; + } + + address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data, + CXL_CACHE_LINE_SIZE); + return true; +} + void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d) { ct3d->poison_list_overflowed = true; @@ -1168,6 +1204,7 @@ static void ct3_class_init(ObjectClass *oc, void *data) cvc->get_lsa_size = get_lsa_size; cvc->get_lsa = get_lsa; cvc->set_lsa = set_lsa; + cvc->set_cacheline = set_cacheline; } static const TypeInfo ct3d_info = { diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h index 32c234ea91..73328a52cf 100644 --- a/include/hw/cxl/cxl_device.h +++ b/include/hw/cxl/cxl_device.h @@ -298,6 +298,7 @@ struct CXLType3Class { uint64_t offset); void (*set_lsa)(CXLType3Dev *ct3d, const void *buf, uint64_t size, uint64_t offset); + bool (*set_cacheline)(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data); }; MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,