From patchwork Thu Sep 18 10:33:54 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Janne Grunau X-Patchwork-Id: 4930341 Return-Path: X-Original-To: patchwork-ceph-devel@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 98646BEEA5 for ; Thu, 18 Sep 2014 10:34:24 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 244EB201E4 for ; Thu, 18 Sep 2014 10:34:23 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 72AE7201FE for ; Thu, 18 Sep 2014 10:34:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756440AbaIRKeN (ORCPT ); Thu, 18 Sep 2014 06:34:13 -0400 Received: from soltyk.jannau.net ([185.27.253.110]:44983 "EHLO soltyk.jannau.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756472AbaIRKeF (ORCPT ); Thu, 18 Sep 2014 06:34:05 -0400 Received: from coburn.home.jannau.net (55d4585a.access.ecotel.net [85.212.88.90]) by soltyk.jannau.net (Postfix) with ESMTPSA id 833433E1958 for ; Thu, 18 Sep 2014 12:34:03 +0200 (CEST) From: Janne Grunau To: ceph-devel@vger.kernel.org Subject: [PATCH v2 2/3] ec: use 32-byte aligned buffers Date: Thu, 18 Sep 2014 12:33:54 +0200 Message-Id: <1411036435-18860-3-git-send-email-j@jannau.net> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1411036435-18860-1-git-send-email-j@jannau.net> References: <1410796508-28711-1-git-send-email-j@jannau.net> <1411036435-18860-1-git-send-email-j@jannau.net> Sender: ceph-devel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: ceph-devel@vger.kernel.org X-Spam-Status: No, score=-7.5 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Requiring page aligned buffers and realigning the input if necessary creates measurable oberhead. ceph_erasure_code_benchmark is ~30% faster with this change for technique=reed_sol_van,k=2,m=1. Also prevents a misaligned buffer when bufferlist::c_str(bufferlist) has to allocate a new buffer to provide continuous one. See bug #9408 Signed-off-by: Janne Grunau --- src/erasure-code/ErasureCode.cc | 57 ++++++++++++++++++++++++++++------------- src/erasure-code/ErasureCode.h | 3 ++- 2 files changed, 41 insertions(+), 19 deletions(-) diff --git a/src/erasure-code/ErasureCode.cc b/src/erasure-code/ErasureCode.cc index 5953f49..7aa5235 100644 --- a/src/erasure-code/ErasureCode.cc +++ b/src/erasure-code/ErasureCode.cc @@ -54,22 +54,49 @@ int ErasureCode::minimum_to_decode_with_cost(const set &want_to_read, } int ErasureCode::encode_prepare(const bufferlist &raw, - bufferlist *prepared) const + map &encoded) const { unsigned int k = get_data_chunk_count(); unsigned int m = get_chunk_count() - k; unsigned blocksize = get_chunk_size(raw.length()); - unsigned padded_length = blocksize * k; - *prepared = raw; - if (padded_length - raw.length() > 0) { - bufferptr pad(padded_length - raw.length()); - pad.zero(); - prepared->push_back(pad); + unsigned pad_len = blocksize * k - raw.length(); + unsigned padded_chunks = k - raw.length() / blocksize; + bufferlist prepared = raw; + + if (!prepared.is_aligned()) { + // splice padded chunks off to make the rebuild faster + if (padded_chunks) + prepared.splice((k - padded_chunks) * blocksize, + padded_chunks * blocksize - pad_len); + prepared.rebuild_aligned(); + } + + for (unsigned int i = 0; i < k - padded_chunks; i++) { + int chunk_index = chunk_mapping.size() > 0 ? chunk_mapping[i] : i; + bufferlist &chunk = encoded[chunk_index]; + chunk.substr_of(prepared, i * blocksize, blocksize); + } + if (padded_chunks) { + unsigned remainder = raw.length() - (k - padded_chunks) * blocksize; + bufferlist padded; + bufferptr buf(buffer::create_aligned(padded_chunks * blocksize)); + + raw.copy((k - padded_chunks) * blocksize, remainder, buf.c_str()); + buf.zero(remainder, pad_len); + padded.push_back(buf); + + for (unsigned int i = k - padded_chunks; i < k; i++) { + int chunk_index = chunk_mapping.size() > 0 ? chunk_mapping[i] : i; + bufferlist &chunk = encoded[chunk_index]; + chunk.substr_of(padded, (i - (k - padded_chunks)) * blocksize, blocksize); + } + } + for (unsigned int i = k; i < k + m; i++) { + int chunk_index = chunk_mapping.size() > 0 ? chunk_mapping[i] : i; + bufferlist &chunk = encoded[chunk_index]; + chunk.push_back(buffer::create_aligned(blocksize)); } - unsigned coding_length = blocksize * m; - bufferptr coding(buffer::create_page_aligned(coding_length)); - prepared->push_back(coding); - prepared->rebuild_page_aligned(); + return 0; } @@ -80,15 +107,9 @@ int ErasureCode::encode(const set &want_to_encode, unsigned int k = get_data_chunk_count(); unsigned int m = get_chunk_count() - k; bufferlist out; - int err = encode_prepare(in, &out); + int err = encode_prepare(in, *encoded); if (err) return err; - unsigned blocksize = get_chunk_size(in.length()); - for (unsigned int i = 0; i < k + m; i++) { - int chunk_index = chunk_mapping.size() > 0 ? chunk_mapping[i] : i; - bufferlist &chunk = (*encoded)[chunk_index]; - chunk.substr_of(out, i * blocksize, blocksize); - } encode_chunks(want_to_encode, encoded); for (unsigned int i = 0; i < k + m; i++) { if (want_to_encode.count(i) == 0) diff --git a/src/erasure-code/ErasureCode.h b/src/erasure-code/ErasureCode.h index 7aaea95..62aa383 100644 --- a/src/erasure-code/ErasureCode.h +++ b/src/erasure-code/ErasureCode.h @@ -46,7 +46,8 @@ namespace ceph { const map &available, set *minimum); - int encode_prepare(const bufferlist &raw, bufferlist *prepared) const; + int encode_prepare(const bufferlist &raw, + map &encoded) const; virtual int encode(const set &want_to_encode, const bufferlist &in,