From patchwork Fri Jul 10 20:29:19 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wilcox, Matthew R" X-Patchwork-Id: 6768261 Return-Path: X-Original-To: patchwork-linux-fsdevel@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 58AE49F6E8 for ; Fri, 10 Jul 2015 20:29:53 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 758BF20782 for ; Fri, 10 Jul 2015 20:29:52 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 85F4120788 for ; Fri, 10 Jul 2015 20:29:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933141AbbGJU3p (ORCPT ); Fri, 10 Jul 2015 16:29:45 -0400 Received: from mga09.intel.com ([134.134.136.24]:60481 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932881AbbGJU3f (ORCPT ); Fri, 10 Jul 2015 16:29:35 -0400 Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga102.jf.intel.com with ESMTP; 10 Jul 2015 13:29:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.15,449,1432623600"; d="scan'208";a="522202507" Received: from qwang4-mobl2.ccr.corp.intel.com (HELO thog.int.wil.cx) ([10.252.194.226]) by FMSMGA003.fm.intel.com with SMTP; 10 Jul 2015 13:29:34 -0700 Received: by thog.int.wil.cx (Postfix, from userid 1000) id 9F9EE60A97; Fri, 10 Jul 2015 16:29:33 -0400 (EDT) From: Matthew Wilcox To: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org Cc: Matthew Wilcox Subject: [PATCH 04/10] mm: Add a pmd_fault handler Date: Fri, 10 Jul 2015 16:29:19 -0400 Message-Id: <1436560165-8943-5-git-send-email-matthew.r.wilcox@intel.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1436560165-8943-1-git-send-email-matthew.r.wilcox@intel.com> References: <1436560165-8943-1-git-send-email-matthew.r.wilcox@intel.com> Sender: linux-fsdevel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-fsdevel@vger.kernel.org X-Spam-Status: No, score=-7.5 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Matthew Wilcox Allow non-anonymous VMAs to provide huge pages in response to a page fault. Signed-off-by: Matthew Wilcox --- include/linux/mm.h | 2 ++ mm/memory.c | 30 ++++++++++++++++++++++++------ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 2e872f9..00473e4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -246,6 +246,8 @@ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); + int (*pmd_fault)(struct vm_area_struct *, unsigned long address, + pmd_t *, unsigned int flags); void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); /* notification that a previously read-only page is about to become diff --git a/mm/memory.c b/mm/memory.c index a84fbb7..32007d6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3209,6 +3209,27 @@ out: return 0; } +static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, unsigned int flags) +{ + if (!vma->vm_ops) + return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); + if (vma->vm_ops->pmd_fault) + return vma->vm_ops->pmd_fault(vma, address, pmd, flags); + return VM_FAULT_FALLBACK; +} + +static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, pmd_t orig_pmd, + unsigned int flags) +{ + if (!vma->vm_ops) + return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); + if (vma->vm_ops->pmd_fault) + return vma->vm_ops->pmd_fault(vma, address, pmd, flags); + return VM_FAULT_FALLBACK; +} + /* * These routines also need to handle stuff like marking pages dirty * and/or accessed for architectures that don't do it in hardware (most @@ -3312,10 +3333,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (!pmd) return VM_FAULT_OOM; if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { - int ret = VM_FAULT_FALLBACK; - if (!vma->vm_ops) - ret = do_huge_pmd_anonymous_page(mm, vma, address, - pmd, flags); + int ret = create_huge_pmd(mm, vma, address, pmd, flags); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { @@ -3339,8 +3357,8 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, orig_pmd, pmd); if (dirty && !pmd_write(orig_pmd)) { - ret = do_huge_pmd_wp_page(mm, vma, address, pmd, - orig_pmd); + ret = wp_huge_pmd(mm, vma, address, pmd, + orig_pmd, flags); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else {