From patchwork Thu Sep 1 14:37:39 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Tian, Kevin" X-Patchwork-Id: 12961920 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 74F75ECAAD1 for ; Thu, 1 Sep 2022 07:19:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233776AbiIAHTh (ORCPT ); Thu, 1 Sep 2022 03:19:37 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:32890 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233874AbiIAHSo (ORCPT ); Thu, 1 Sep 2022 03:18:44 -0400 Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7A35D1223B5; Thu, 1 Sep 2022 00:18:41 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1662016721; x=1693552721; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=fTTvGXDm3y5vxlunNw3lM7XvdJmmRuB7BkP4htaMFu0=; b=S5rxeceD3xEh8I9mhDGiwK3WwI6xxuHFIVPqXZXDK1ff0GiJkVrEVq3I YrdSy9Qff7d/nQrut942/s9DFsFpHOzxGbI/ntDZp4WBU6qWpjdCvpAFZ aDKumhbSfkeVlD+8TVJtFGYaCaXW6b+ajd6BIp32+ljr4/Czzeyor7jNL Gg7jvxwySpe+dvGDnzIa2K/VYoiFOma7DIV0VDt/L9hJbfQd+V8N64+vB Zy0GjB1RLLe1HWqcUgxxay7AHfJYQ4qRYKS9+pyLgqSRacPhg0yebBOYh en7FSUumSxK3wa64TWW4Q/WwXs+DRD1dg/5X18hpw4Z367IAChVz0XteG Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10456"; a="275386155" X-IronPort-AV: E=Sophos;i="5.93,280,1654585200"; d="scan'208";a="275386155" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Sep 2022 00:18:40 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.93,280,1654585200"; d="scan'208";a="673720218" Received: from sqa-gate.sh.intel.com (HELO michael.clx.dev.tsp.org) ([10.239.48.212]) by fmsmga008.fm.intel.com with ESMTP; 01 Sep 2022 00:18:32 -0700 From: Kevin Tian To: Zhenyu Wang , Zhi Wang , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , David Airlie , Daniel Vetter , Eric Farman , Matthew Rosato , Halil Pasic , Vineeth Vijayan , Peter Oberparleiter , Heiko Carstens , Vasily Gorbik , Alexander Gordeev , Christian Borntraeger , Sven Schnelle , Tony Krowiak , Jason Herne , Harald Freudenberger , Diana Craciun , Alex Williamson , Cornelia Huck , Longfang Liu , Shameer Kolothum , Jason Gunthorpe , Yishai Hadas , Kevin Tian , Eric Auger , Kirti Wankhede , Leon Romanovsky , Abhishek Sahu , intel-gvt-dev@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, linux-s390@vger.kernel.org, kvm@vger.kernel.org Cc: Yi Liu Subject: [PATCH v2 07/15] vfio/mbochs: Use the new device life cycle helpers Date: Thu, 1 Sep 2022 22:37:39 +0800 Message-Id: <20220901143747.32858-8-kevin.tian@intel.com> X-Mailer: git-send-email 2.21.3 In-Reply-To: <20220901143747.32858-1-kevin.tian@intel.com> References: <20220901143747.32858-1-kevin.tian@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org From: Yi Liu and manage avail_mbytes inside @init/@release. Signed-off-by: Yi Liu Signed-off-by: Kevin Tian Reviewed-by: Jason Gunthorpe --- samples/vfio-mdev/mbochs.c | 73 ++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 27 deletions(-) diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index 344c2901a82b..df95f25fbc0e 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -505,13 +505,14 @@ static int mbochs_reset(struct mdev_state *mdev_state) return 0; } -static int mbochs_probe(struct mdev_device *mdev) +static int mbochs_init_dev(struct vfio_device *vdev) { - int avail_mbytes = atomic_read(&mbochs_avail_mbytes); + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); const struct mbochs_type *type = &mbochs_types[mdev_get_type_group_id(mdev)]; - struct device *dev = mdev_dev(mdev); - struct mdev_state *mdev_state; + int avail_mbytes = atomic_read(&mbochs_avail_mbytes); int ret = -ENOMEM; do { @@ -520,14 +521,9 @@ static int mbochs_probe(struct mdev_device *mdev) } while (!atomic_try_cmpxchg(&mbochs_avail_mbytes, &avail_mbytes, avail_mbytes - type->mbytes)); - mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); - if (mdev_state == NULL) - goto err_avail; - vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mbochs_dev_ops); - mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL); - if (mdev_state->vconfig == NULL) - goto err_mem; + if (!mdev_state->vconfig) + goto err_avail; mdev_state->memsize = type->mbytes * 1024 * 1024; mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT; @@ -535,10 +531,7 @@ static int mbochs_probe(struct mdev_device *mdev) sizeof(struct page *), GFP_KERNEL); if (!mdev_state->pages) - goto err_mem; - - dev_info(dev, "%s: %s, %d MB, %ld pages\n", __func__, - type->name, type->mbytes, mdev_state->pagecount); + goto err_vconfig; mutex_init(&mdev_state->ops_lock); mdev_state->mdev = mdev; @@ -553,19 +546,47 @@ static int mbochs_probe(struct mdev_device *mdev) mbochs_create_config_space(mdev_state); mbochs_reset(mdev_state); + dev_info(vdev->dev, "%s: %s, %d MB, %ld pages\n", __func__, + type->name, type->mbytes, mdev_state->pagecount); + return 0; + +err_vconfig: + kfree(mdev_state->vconfig); +err_avail: + atomic_add(type->mbytes, &mbochs_avail_mbytes); + return ret; +} + +static int mbochs_probe(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state; + int ret = -ENOMEM; + + mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, + &mbochs_dev_ops); + if (IS_ERR(mdev_state)) + return PTR_ERR(mdev_state); + ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); if (ret) - goto err_mem; + goto err_put_vdev; dev_set_drvdata(&mdev->dev, mdev_state); return 0; -err_mem: - vfio_uninit_group_dev(&mdev_state->vdev); + +err_put_vdev: + vfio_put_device(&mdev_state->vdev); + return ret; +} + +static void mbochs_release_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + kfree(mdev_state->pages); kfree(mdev_state->vconfig); - kfree(mdev_state); -err_avail: - atomic_add(type->mbytes, &mbochs_avail_mbytes); - return ret; + vfio_free_device(vdev); + atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes); } static void mbochs_remove(struct mdev_device *mdev) @@ -573,11 +594,7 @@ static void mbochs_remove(struct mdev_device *mdev) struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); vfio_unregister_group_dev(&mdev_state->vdev); - vfio_uninit_group_dev(&mdev_state->vdev); - atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes); - kfree(mdev_state->pages); - kfree(mdev_state->vconfig); - kfree(mdev_state); + vfio_put_device(&mdev_state->vdev); } static ssize_t mbochs_read(struct vfio_device *vdev, char __user *buf, @@ -1397,6 +1414,8 @@ static struct attribute_group *mdev_type_groups[] = { static const struct vfio_device_ops mbochs_dev_ops = { .close_device = mbochs_close_device, + .init = mbochs_init_dev, + .release = mbochs_release_dev, .read = mbochs_read, .write = mbochs_write, .ioctl = mbochs_ioctl,