From patchwork Sun Jan 6 06:36:13 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Asias He X-Patchwork-Id: 1937081 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork1.kernel.org (Postfix) with ESMTP id 4E1F93FC85 for ; Sun, 6 Jan 2013 06:34:48 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751115Ab3AFGel (ORCPT ); Sun, 6 Jan 2013 01:34:41 -0500 Received: from mx1.redhat.com ([209.132.183.28]:18320 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751080Ab3AFGek (ORCPT ); Sun, 6 Jan 2013 01:34:40 -0500 Received: from int-mx10.intmail.prod.int.phx2.redhat.com (int-mx10.intmail.prod.int.phx2.redhat.com [10.5.11.23]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id r066YY2R010228 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Sun, 6 Jan 2013 01:34:34 -0500 Received: from hj.localdomain.com ([10.66.6.0]) by int-mx10.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id r066YUYL022870; Sun, 6 Jan 2013 01:34:31 -0500 From: Asias He To: Nicholas Bellinger Cc: "Michael S. Tsirkin" , Rusty Russell , kvm@vger.kernel.org, virtualization@lists.linux-foundation.org Subject: [PATCH] tcm_vhost: Use llist for cmd completion list Date: Sun, 6 Jan 2013 14:36:13 +0800 Message-Id: <1357454173-5973-1-git-send-email-asias@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.23 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org This drops the cmd completion list spin lock and makes the cmd completion queue lock-less. Signed-off-by: Asias He Reviewed-by: Paolo Bonzini Acked-by: Nicholas Bellinger --- drivers/vhost/tcm_vhost.c | 46 +++++++++++++--------------------------------- drivers/vhost/tcm_vhost.h | 2 +- 2 files changed, 14 insertions(+), 34 deletions(-) diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index b20df5c..3720604 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -47,6 +47,7 @@ #include #include /* TODO vhost.h currently depends on this */ #include +#include #include "vhost.c" #include "vhost.h" @@ -64,8 +65,7 @@ struct vhost_scsi { struct vhost_virtqueue vqs[3]; struct vhost_work vs_completion_work; /* cmd completion work item */ - struct list_head vs_completion_list; /* cmd completion queue */ - spinlock_t vs_completion_lock; /* protects s_completion_list */ + struct llist_head vs_completion_list; /* cmd completion queue */ }; /* Local pointer to allocated TCM configfs fabric module */ @@ -301,9 +301,7 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) { struct vhost_scsi *vs = tv_cmd->tvc_vhost; - spin_lock_bh(&vs->vs_completion_lock); - list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); - spin_unlock_bh(&vs->vs_completion_lock); + llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); vhost_work_queue(&vs->dev, &vs->vs_completion_work); } @@ -347,27 +345,6 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) kfree(tv_cmd); } -/* Dequeue a command from the completion list */ -static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion( - struct vhost_scsi *vs) -{ - struct tcm_vhost_cmd *tv_cmd = NULL; - - spin_lock_bh(&vs->vs_completion_lock); - if (list_empty(&vs->vs_completion_list)) { - spin_unlock_bh(&vs->vs_completion_lock); - return NULL; - } - - list_for_each_entry(tv_cmd, &vs->vs_completion_list, - tvc_completion_list) { - list_del(&tv_cmd->tvc_completion_list); - break; - } - spin_unlock_bh(&vs->vs_completion_lock); - return tv_cmd; -} - /* Fill in status and signal that we are done processing this command * * This is scheduled in the vhost work queue so we are called with the owner @@ -377,12 +354,18 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) { struct vhost_scsi *vs = container_of(work, struct vhost_scsi, vs_completion_work); + struct virtio_scsi_cmd_resp v_rsp; struct tcm_vhost_cmd *tv_cmd; + struct llist_node *llnode; + struct se_cmd *se_cmd; + int ret; - while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) { - struct virtio_scsi_cmd_resp v_rsp; - struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; - int ret; + llnode = llist_del_all(&vs->vs_completion_list); + while (llnode) { + tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd, + tvc_completion_list); + llnode = llist_next(llnode); + se_cmd = &tv_cmd->tvc_se_cmd; pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, tv_cmd, se_cmd->residual_count, se_cmd->scsi_status); @@ -426,7 +409,6 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( pr_err("Unable to allocate struct tcm_vhost_cmd\n"); return ERR_PTR(-ENOMEM); } - INIT_LIST_HEAD(&tv_cmd->tvc_completion_list); tv_cmd->tvc_tag = v_req->tag; tv_cmd->tvc_task_attr = v_req->task_attr; tv_cmd->tvc_exp_data_len = exp_data_len; @@ -859,8 +841,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) return -ENOMEM; vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); - INIT_LIST_HEAD(&s->vs_completion_list); - spin_lock_init(&s->vs_completion_lock); s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h index 7e87c63..47ee80b 100644 --- a/drivers/vhost/tcm_vhost.h +++ b/drivers/vhost/tcm_vhost.h @@ -34,7 +34,7 @@ struct tcm_vhost_cmd { /* Sense buffer that will be mapped into outgoing status */ unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; /* Completed commands list, serviced from vhost worker thread */ - struct list_head tvc_completion_list; + struct llist_node tvc_completion_list; }; struct tcm_vhost_nexus {