From patchwork Wed Sep 21 10:43:53 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Tian, Kevin" X-Patchwork-Id: 12983137 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DCABCC32771 for ; Wed, 21 Sep 2022 03:27:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231381AbiIUD1s (ORCPT ); Tue, 20 Sep 2022 23:27:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:60112 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231255AbiIUD1X (ORCPT ); Tue, 20 Sep 2022 23:27:23 -0400 Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B90EE7EFEC; Tue, 20 Sep 2022 20:27:20 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1663730841; x=1695266841; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=dDtsOeZEKZeVRJLbUwk/2idlkEX+Yr4BRsiywP8eIAU=; b=SKi9DMExqwOW0atK+VfUf0F5HscT00+il6HPiAUcvGTiNOGWcJPraqDb MfASojpQk86ijes98PAOf/1o1Ssba5qUlQD8QnyqWmp2FfpB7p64IvicG wcU+CRQmkExU4vied5HUsslBOWdcRMSk8LLKBNCE+LmLlYbz/epsNZdIj N0hk2uzHFnevGj3wHM7Nz3G52ao2wJUVBh4kHprEE2rCGB7ZJkX8P+hSG oFQEOD4ic6oES90QyJZSxzlaNK6JYQg28X9IT7U1Moa7h3tDeQ/cCFnjB QmjKMrpDxzSI8f3c5JmJmigmYdg4Dbs5ueP9JrHa08Xdif7y0DW6WvYbU A==; X-IronPort-AV: E=McAfee;i="6500,9779,10476"; a="301284786" X-IronPort-AV: E=Sophos;i="5.93,332,1654585200"; d="scan'208";a="301284786" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Sep 2022 20:27:20 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.93,332,1654585200"; d="scan'208";a="761574303" Received: from sqa-gate.sh.intel.com (HELO michael.clx.dev.tsp.org) ([10.239.48.212]) by fmsmga001.fm.intel.com with ESMTP; 20 Sep 2022 20:27:12 -0700 From: Kevin Tian To: Zhenyu Wang , Zhi Wang , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , David Airlie , Daniel Vetter , Eric Farman , Matthew Rosato , Halil Pasic , Vineeth Vijayan , Peter Oberparleiter , Heiko Carstens , Vasily Gorbik , Alexander Gordeev , Christian Borntraeger , Sven Schnelle , Tony Krowiak , Jason Herne , Harald Freudenberger , Diana Craciun , Alex Williamson , Cornelia Huck , Longfang Liu , Shameer Kolothum , Jason Gunthorpe , Yishai Hadas , Kevin Tian , Eric Auger , Kirti Wankhede , Leon Romanovsky , Abhishek Sahu , Christoph Hellwig , intel-gvt-dev@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, linux-s390@vger.kernel.org, kvm@vger.kernel.org Cc: Yi Liu Subject: [PATCH v4 07/15] vfio/mbochs: Use the new device life cycle helpers Date: Wed, 21 Sep 2022 18:43:53 +0800 Message-Id: <20220921104401.38898-8-kevin.tian@intel.com> X-Mailer: git-send-email 2.21.3 In-Reply-To: <20220921104401.38898-1-kevin.tian@intel.com> References: <20220921104401.38898-1-kevin.tian@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org From: Yi Liu and manage avail_mbytes inside @init/@release. Signed-off-by: Yi Liu Signed-off-by: Kevin Tian Reviewed-by: Jason Gunthorpe --- samples/vfio-mdev/mbochs.c | 73 ++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 27 deletions(-) diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index 344c2901a82b..6901947e27d2 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -505,13 +505,14 @@ static int mbochs_reset(struct mdev_state *mdev_state) return 0; } -static int mbochs_probe(struct mdev_device *mdev) +static int mbochs_init_dev(struct vfio_device *vdev) { - int avail_mbytes = atomic_read(&mbochs_avail_mbytes); + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); const struct mbochs_type *type = &mbochs_types[mdev_get_type_group_id(mdev)]; - struct device *dev = mdev_dev(mdev); - struct mdev_state *mdev_state; + int avail_mbytes = atomic_read(&mbochs_avail_mbytes); int ret = -ENOMEM; do { @@ -520,14 +521,9 @@ static int mbochs_probe(struct mdev_device *mdev) } while (!atomic_try_cmpxchg(&mbochs_avail_mbytes, &avail_mbytes, avail_mbytes - type->mbytes)); - mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); - if (mdev_state == NULL) - goto err_avail; - vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mbochs_dev_ops); - mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL); - if (mdev_state->vconfig == NULL) - goto err_mem; + if (!mdev_state->vconfig) + goto err_avail; mdev_state->memsize = type->mbytes * 1024 * 1024; mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT; @@ -535,10 +531,7 @@ static int mbochs_probe(struct mdev_device *mdev) sizeof(struct page *), GFP_KERNEL); if (!mdev_state->pages) - goto err_mem; - - dev_info(dev, "%s: %s, %d MB, %ld pages\n", __func__, - type->name, type->mbytes, mdev_state->pagecount); + goto err_vconfig; mutex_init(&mdev_state->ops_lock); mdev_state->mdev = mdev; @@ -553,19 +546,47 @@ static int mbochs_probe(struct mdev_device *mdev) mbochs_create_config_space(mdev_state); mbochs_reset(mdev_state); + dev_info(vdev->dev, "%s: %s, %d MB, %ld pages\n", __func__, + type->name, type->mbytes, mdev_state->pagecount); + return 0; + +err_vconfig: + kfree(mdev_state->vconfig); +err_avail: + atomic_add(type->mbytes, &mbochs_avail_mbytes); + return ret; +} + +static int mbochs_probe(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state; + int ret = -ENOMEM; + + mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, + &mbochs_dev_ops); + if (IS_ERR(mdev_state)) + return PTR_ERR(mdev_state); + ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); if (ret) - goto err_mem; + goto err_put_vdev; dev_set_drvdata(&mdev->dev, mdev_state); return 0; -err_mem: - vfio_uninit_group_dev(&mdev_state->vdev); + +err_put_vdev: + vfio_put_device(&mdev_state->vdev); + return ret; +} + +static void mbochs_release_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + + atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes); kfree(mdev_state->pages); kfree(mdev_state->vconfig); - kfree(mdev_state); -err_avail: - atomic_add(type->mbytes, &mbochs_avail_mbytes); - return ret; + vfio_free_device(vdev); } static void mbochs_remove(struct mdev_device *mdev) @@ -573,11 +594,7 @@ static void mbochs_remove(struct mdev_device *mdev) struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); vfio_unregister_group_dev(&mdev_state->vdev); - vfio_uninit_group_dev(&mdev_state->vdev); - atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes); - kfree(mdev_state->pages); - kfree(mdev_state->vconfig); - kfree(mdev_state); + vfio_put_device(&mdev_state->vdev); } static ssize_t mbochs_read(struct vfio_device *vdev, char __user *buf, @@ -1397,6 +1414,8 @@ static struct attribute_group *mdev_type_groups[] = { static const struct vfio_device_ops mbochs_dev_ops = { .close_device = mbochs_close_device, + .init = mbochs_init_dev, + .release = mbochs_release_dev, .read = mbochs_read, .write = mbochs_write, .ioctl = mbochs_ioctl,