From patchwork Mon Mar 16 12:37:02 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Hannes Reinecke X-Patchwork-Id: 6018261 X-Patchwork-Delegate: christophe.varoqui@free.fr Return-Path: X-Original-To: patchwork-dm-devel@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id C9811BF910 for ; Mon, 16 Mar 2015 12:44:32 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id CE5F2204D1 for ; Mon, 16 Mar 2015 12:44:31 +0000 (UTC) Received: from mx5-phx2.redhat.com (mx5-phx2.redhat.com [209.132.183.37]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id A2F23204D8 for ; Mon, 16 Mar 2015 12:44:30 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by mx5-phx2.redhat.com (8.14.4/8.14.4) with ESMTP id t2GCccja055288; Mon, 16 Mar 2015 08:38:38 -0400 Received: from int-mx10.intmail.prod.int.phx2.redhat.com (int-mx10.intmail.prod.int.phx2.redhat.com [10.5.11.23]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id t2GCbMAj028999 for ; Mon, 16 Mar 2015 08:37:22 -0400 Received: from mx1.redhat.com (ext-mx15.extmail.prod.ext.phx2.redhat.com [10.5.110.20]) by int-mx10.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id t2GCbMYD020957; Mon, 16 Mar 2015 08:37:22 -0400 Received: from mx2.suse.de (cantor2.suse.de [195.135.220.15]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id t2GCbJ9c011668 (version=TLSv1/SSLv3 cipher=DHE-RSA-CAMELLIA256-SHA bits=256 verify=FAIL); Mon, 16 Mar 2015 08:37:20 -0400 X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay1.suse.de (charybdis-ext.suse.de [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id 83E68ADFF; Mon, 16 Mar 2015 12:37:11 +0000 (UTC) From: Hannes Reinecke To: Christophe Varoqui Date: Mon, 16 Mar 2015 13:37:02 +0100 Message-Id: <1426509425-15978-76-git-send-email-hare@suse.de> In-Reply-To: <1426509425-15978-1-git-send-email-hare@suse.de> References: <1426509425-15978-1-git-send-email-hare@suse.de> X-RedHat-Spam-Score: -7.309 (BAYES_00, DCC_REPUT_00_12, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, URIBL_BLOCKED) 195.135.220.15 cantor2.suse.de 195.135.220.15 cantor2.suse.de X-Scanned-By: MIMEDefang 2.68 on 10.5.11.23 X-Scanned-By: MIMEDefang 2.68 on 10.5.110.20 X-loop: dm-devel@redhat.com Cc: dm-devel@redhat.com Subject: [dm-devel] [PATCH 75/78] Push down vector lock during uevent processing X-BeenThere: dm-devel@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk Reply-To: device-mapper development List-Id: device-mapper development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: dm-devel-bounces@redhat.com Errors-To: dm-devel-bounces@redhat.com X-Spam-Status: No, score=-4.2 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP When adding lots of paths the vector lock which is taken at the start of the uevent handler will become a bottleneck as it'll compete with the checkerloop. So move the vector handling down into the individual event handler. Signed-off-by: Hannes Reinecke --- multipathd/main.c | 67 +++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 19 deletions(-) diff --git a/multipathd/main.c b/multipathd/main.c index ab2a3a7..77a1241 100644 --- a/multipathd/main.c +++ b/multipathd/main.c @@ -279,7 +279,11 @@ uev_add_map (struct uevent * uev, struct vectors * vecs) return 1; } } + pthread_cleanup_push(cleanup_lock, &vecs->lock); + lock(vecs->lock); + pthread_testcancel(); rc = ev_add_map(uev->kernel, alias, vecs); + lock_cleanup_pop(vecs->lock); FREE(alias); return rc; } @@ -361,6 +365,10 @@ uev_remove_map (struct uevent * uev, struct vectors * vecs) return 0; } minor = uevent_get_minor(uev); + + pthread_cleanup_push(cleanup_lock, &vecs->lock); + lock(vecs->lock); + pthread_testcancel(); mpp = find_mp_by_minor(vecs->mpvec, minor); if (!mpp) { @@ -377,10 +385,12 @@ uev_remove_map (struct uevent * uev, struct vectors * vecs) orphan_paths(vecs->pathvec, mpp); remove_map_and_stop_waiter(mpp, vecs, 1); out: + lock_cleanup_pop(vecs->lock); FREE(alias); return 0; } +/* Called from CLI handler */ int ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs) { @@ -416,6 +426,9 @@ uev_add_path (struct uevent *uev, struct vectors * vecs) return 1; } + pthread_cleanup_push(cleanup_lock, &vecs->lock); + lock(vecs->lock); + pthread_testcancel(); pp = find_path_by_dev(vecs->pathvec, uev->kernel); if (pp) { int r; @@ -443,8 +456,15 @@ uev_add_path (struct uevent *uev, struct vectors * vecs) ret = 1; } } - return ret; } + /* + * The linux implementation of pthread_lock() and pthread_unlock() + * requires that both must be at the same indentation level, + * hence the slightly odd coding. + */ + lock_cleanup_pop(vecs->lock); + if (pp) + return ret; /* * get path vital state @@ -462,6 +482,9 @@ uev_add_path (struct uevent *uev, struct vectors * vecs) free_path(pp); return 1; } + pthread_cleanup_push(cleanup_lock, &vecs->lock); + lock(vecs->lock); + pthread_testcancel(); ret = store_path(vecs->pathvec, pp); if (!ret) { pp->checkint = conf->checkint; @@ -473,7 +496,7 @@ uev_add_path (struct uevent *uev, struct vectors * vecs) free_path(pp); ret = 1; } - + lock_cleanup_pop(vecs->lock); return ret; } @@ -535,12 +558,12 @@ rescan: */ start_waiter = 1; } - else + if (!start_waiter) goto fail; /* leave path added to pathvec */ } - /* persistent reseravtion check*/ - mpath_pr_event_handle(pp); + /* persistent reservation check*/ + mpath_pr_event_handle(pp); /* * push the map to the device-mapper @@ -568,7 +591,7 @@ retry: * deal with asynchronous uevents :(( */ if (mpp->action == ACT_RELOAD && retries-- > 0) { - condlog(0, "%s: uev_add_path sleep", mpp->alias); + condlog(0, "%s: ev_add_path sleep", mpp->alias); sleep(1); update_mpp_paths(mpp, vecs->pathvec); goto rescan; @@ -597,8 +620,7 @@ retry: condlog(2, "%s [%s]: path added to devmap %s", pp->dev, pp->dev_t, mpp->alias); return 0; - } - else + } else goto fail; fail_map: @@ -612,17 +634,22 @@ static int uev_remove_path (struct uevent *uev, struct vectors * vecs) { struct path *pp; + int ret; condlog(2, "%s: remove path (uevent)", uev->kernel); + pthread_cleanup_push(cleanup_lock, &vecs->lock); + lock(vecs->lock); + pthread_testcancel(); pp = find_path_by_dev(vecs->pathvec, uev->kernel); - + if (pp) + ret = ev_remove_path(pp, vecs); + lock_cleanup_pop(vecs->lock); if (!pp) { /* Not an error; path might have been purged earlier */ condlog(0, "%s: path already removed", uev->kernel); return 0; } - - return ev_remove_path(pp, vecs); + return ret; } int @@ -726,20 +753,27 @@ uev_update_path (struct uevent *uev, struct vectors * vecs) if (ro >= 0) { struct path * pp; + struct multipath *mpp = NULL; condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro); + pthread_cleanup_push(cleanup_lock, &vecs->lock); + lock(vecs->lock); + pthread_testcancel(); pp = find_path_by_dev(vecs->pathvec, uev->kernel); + if (pp) + mpp = pp->mpp; + lock_cleanup_pop(vecs->lock); if (!pp) { condlog(0, "%s: spurious uevent, path not found", uev->kernel); return 1; } - if (pp->mpp) { - retval = reload_map(vecs, pp->mpp, 0); + if (mpp) { + retval = reload_map(vecs, mpp, 0); condlog(2, "%s: map %s reloaded (retval %d)", - uev->kernel, pp->mpp->alias, retval); + uev->kernel, mpp->alias, retval); } } @@ -823,10 +857,6 @@ uev_trigger (struct uevent * uev, void * trigger_data) if (uev_discard(uev->devpath)) return 0; - pthread_cleanup_push(cleanup_lock, &vecs->lock); - lock(vecs->lock); - pthread_testcancel(); - /* * device map event * Add events are ignored here as the tables @@ -865,7 +895,6 @@ uev_trigger (struct uevent * uev, void * trigger_data) } out: - lock_cleanup_pop(vecs->lock); return r; }