@@ -1640,6 +1640,9 @@ config HAVE_ARCH_PFN_VALID
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
bool
+config ARCH_SUPPORTS_MSHARE
+ bool
+
config ARCH_SUPPORTS_PAGE_TABLE_CHECK
bool
@@ -120,6 +120,7 @@ config X86
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ select ARCH_SUPPORTS_MSHARE if X86_64
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if X86_64
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
@@ -1217,6 +1217,8 @@ void do_user_addr_fault(struct pt_regs *regs,
struct mm_struct *mm;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
+ bool is_shared_vma;
+ unsigned long addr;
tsk = current;
mm = tsk->mm;
@@ -1330,6 +1332,12 @@ void do_user_addr_fault(struct pt_regs *regs,
if (!vma)
goto lock_mmap;
+ /* mshare does not support per-VMA locks yet */
+ if (vma_is_mshare(vma)) {
+ vma_end_read(vma);
+ goto lock_mmap;
+ }
+
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address, NULL, vma);
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
@@ -1358,17 +1366,38 @@ void do_user_addr_fault(struct pt_regs *regs,
lock_mmap:
retry:
+ addr = address;
+ is_shared_vma = false;
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
+ if (unlikely(vma_is_mshare(vma))) {
+ fault = find_shared_vma(&vma, &addr);
+
+ if (fault) {
+ mmap_read_unlock(mm);
+ goto done;
+ }
+
+ if (!vma) {
+ mmap_read_unlock(mm);
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+
+ is_shared_vma = true;
+ }
+
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
if (unlikely(access_error(error_code, vma))) {
+ if (unlikely(is_shared_vma))
+ mmap_read_unlock(vma->vm_mm);
bad_area_access_error(regs, error_code, address, mm, vma);
return;
}
@@ -1386,7 +1415,11 @@ void do_user_addr_fault(struct pt_regs *regs,
* userland). The return to userland is identified whenever
* FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
*/
- fault = handle_mm_fault(vma, address, flags, regs);
+ fault = handle_mm_fault(vma, addr, flags, regs);
+
+ if (unlikely(is_shared_vma) && ((fault & VM_FAULT_COMPLETED) ||
+ (fault & VM_FAULT_RETRY) || fault_signal_pending(fault, regs)))
+ mmap_read_unlock(mm);
if (fault_signal_pending(fault, regs)) {
/*
@@ -1414,6 +1447,8 @@ void do_user_addr_fault(struct pt_regs *regs,
goto retry;
}
+ if (unlikely(is_shared_vma))
+ mmap_read_unlock(vma->vm_mm);
mmap_read_unlock(mm);
done:
if (likely(!(fault & VM_FAULT_ERROR)))
@@ -1360,7 +1360,7 @@ config PT_RECLAIM
config MSHARE
bool "Mshare"
- depends on MMU
+ depends on MMU && ARCH_SUPPORTS_MSHARE
help
Enable msharefs: A ram-based filesystem that allows multiple
processes to share page table entries for shared pages. A file
Enable x86 support for handling page faults in an mshare region by redirecting page faults to operate on the mshare mm_struct and vmas contained in it. Some permissions checks are done using vma flags in architecture-specfic fault handling code so the actual vma needed to complete the handling is acquired before calling handle_mm_fault(). Because of this an ARCH_SUPPORTS_MSHARE config option is added. Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com> --- arch/Kconfig | 3 +++ arch/x86/Kconfig | 1 + arch/x86/mm/fault.c | 37 ++++++++++++++++++++++++++++++++++++- mm/Kconfig | 2 +- 4 files changed, 41 insertions(+), 2 deletions(-)