From patchwork Mon Nov 30 05:08:55 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Dan Williams X-Patchwork-Id: 7721041 Return-Path: X-Original-To: patchwork-linux-nvdimm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 7C60A9F1C2 for ; Mon, 30 Nov 2015 05:09:25 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 95A5E20663 for ; Mon, 30 Nov 2015 05:09:24 +0000 (UTC) Received: from ml01.01.org (ml01.01.org [198.145.21.10]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 9243F2065E for ; Mon, 30 Nov 2015 05:09:23 +0000 (UTC) Received: from ml01.vlan14.01.org (localhost [IPv6:::1]) by ml01.01.org (Postfix) with ESMTP id 8720B1A1F01; Sun, 29 Nov 2015 21:09:23 -0800 (PST) X-Original-To: linux-nvdimm@lists.01.org Delivered-To: linux-nvdimm@lists.01.org Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by ml01.01.org (Postfix) with ESMTP id 5BE991A1F01 for ; Sun, 29 Nov 2015 21:09:22 -0800 (PST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga102.jf.intel.com with ESMTP; 29 Nov 2015 21:09:23 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,363,1444719600"; d="scan'208";a="860927346" Received: from dwillia2-desk3.jf.intel.com ([10.54.39.136]) by orsmga002.jf.intel.com with ESMTP; 29 Nov 2015 21:09:22 -0800 Subject: [RFC PATCH 4/5] dax: provide diagnostics for pmd mapping failures From: Dan Williams To: linux-mm@kvack.org Date: Sun, 29 Nov 2015 21:08:55 -0800 Message-ID: <20151130050854.18366.17076.stgit@dwillia2-desk3.jf.intel.com> In-Reply-To: <20151130050833.18366.21963.stgit@dwillia2-desk3.jf.intel.com> References: <20151130050833.18366.21963.stgit@dwillia2-desk3.jf.intel.com> User-Agent: StGit/0.17.1-9-g687f MIME-Version: 1.0 Cc: toshi.kani@hp.com, linux-nvdimm@lists.01.org X-BeenThere: linux-nvdimm@lists.01.org X-Mailman-Version: 2.1.17 Precedence: list List-Id: "Linux-nvdimm developer list." List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: linux-nvdimm-bounces@lists.01.org Sender: "Linux-nvdimm" X-Spam-Status: No, score=-2.6 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_LOW, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP There is a wide gamut of conditions that can trigger the dax pmd path to fallback to pte mappings. Ideally we'd have a syscall interface to determine mapping characteristics after the fact. In the meantime provide debug messages. Signed-off-by: Dan Williams --- fs/dax.c | 47 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 9eb46f4b6e38..a429a00628c5 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -567,8 +567,9 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, unsigned blkbits = inode->i_blkbits; unsigned long pmd_addr = address & PMD_MASK; bool write = flags & FAULT_FLAG_WRITE; - struct block_device *bdev; + struct block_device *bdev = NULL; pgoff_t size, pgoff; + const char *reason; sector_t block; int result = 0; @@ -579,21 +580,28 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, /* Fall back to PTEs if we're going to COW */ if (write && !(vma->vm_flags & VM_SHARED)) { split_huge_page_pmd(vma, address, pmd); + reason = "cow write"; return VM_FAULT_FALLBACK; } /* If the PMD would extend outside the VMA */ - if (pmd_addr < vma->vm_start) - return VM_FAULT_FALLBACK; - if ((pmd_addr + PMD_SIZE) > vma->vm_end) - return VM_FAULT_FALLBACK; + if (pmd_addr < vma->vm_start) { + reason = "vma start unaligned"; + goto fallback; + } + if ((pmd_addr + PMD_SIZE) > vma->vm_end) { + reason = "vma end unaligned"; + goto fallback; + } pgoff = linear_page_index(vma, pmd_addr); size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; if (pgoff >= size) return VM_FAULT_SIGBUS; /* If the PMD would cover blocks out of the file */ - if ((pgoff | PG_PMD_COLOUR) >= size) + if ((pgoff | PG_PMD_COLOUR) >= size) { + reason = "offset + huge page size > file size"; return VM_FAULT_FALLBACK; + } memset(&bh, 0, sizeof(bh)); block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); @@ -609,8 +617,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, * just fall back to PTEs. Calling get_block 512 times in a loop * would be silly. */ - if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) + if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) { + reason = "block allocation size invalid"; goto fallback; + } /* * If we allocated new storage, make sure no process has any @@ -633,23 +643,33 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, result = VM_FAULT_SIGBUS; goto out; } - if ((pgoff | PG_PMD_COLOUR) >= size) + if ((pgoff | PG_PMD_COLOUR) >= size) { + reason = "pgoff unaligned"; goto fallback; + } if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { spinlock_t *ptl; pmd_t entry; struct page *zero_page = get_huge_zero_page(); - if (unlikely(!zero_page)) + if (unlikely(!zero_page)) { + reason = "no zero page"; goto fallback; + } ptl = pmd_lock(vma->vm_mm, pmd); if (!pmd_none(*pmd)) { spin_unlock(ptl); + reason = "pmd already present"; goto fallback; } + dev_dbg(part_to_dev(bdev->bd_part), + "%s: %s addr: %lx pfn: sect: %llx\n", + __func__, current->comm, address, + (unsigned long long) to_sector(&bh, inode)); + entry = mk_pmd(zero_page, vma->vm_page_prot); entry = pmd_mkhuge(entry); set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry); @@ -678,6 +698,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, */ if (pfn_t_has_page(dax.pfn)) { dax_unmap_atomic(bdev, &dax); + reason = "pfn not in memmap"; goto fallback; } @@ -690,6 +711,11 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, } dax_unmap_atomic(bdev, &dax); + dev_dbg(part_to_dev(bdev->bd_part), + "%s: %s addr: %lx pfn: %lx sect: %llx\n", + __func__, current->comm, address, + pfn_t_to_pfn(dax.pfn), + (unsigned long long) dax.sector); result |= vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write); } @@ -703,6 +729,9 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, return result; fallback: + pr_debug("%s%s %s: %s addr: %lx fallback: %s\n", bdev + ? dev_name(part_to_dev(bdev->bd_part)) : "", bdev + ? ": " : "", __func__, current->comm, address, reason); count_vm_event(THP_FAULT_FALLBACK); result = VM_FAULT_FALLBACK; goto out;