@@ -261,7 +261,8 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
psize = hstate_get_psize(h);
#ifdef CONFIG_DEBUG_VM
- assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
+ assert_spin_locked(huge_pte_lockptr(huge_page_shift(h),
+ vma->vm_mm, ptep));
#endif
#else
@@ -956,12 +956,11 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
return modified_mask;
}
-static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
+static inline spinlock_t *huge_pte_lockptr(unsigned int shift,
struct mm_struct *mm, pte_t *pte)
{
- if (huge_page_size(h) == PMD_SIZE)
+ if (shift == PMD_SHIFT)
return pmd_lockptr(mm, (pmd_t *) pte);
- VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
return &mm->page_table_lock;
}
@@ -1171,7 +1170,7 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
return 0;
}
-static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
+static inline spinlock_t *huge_pte_lockptr(unsigned int shift,
struct mm_struct *mm, pte_t *pte)
{
return &mm->page_table_lock;
@@ -1228,7 +1227,7 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
{
spinlock_t *ptl;
- ptl = huge_pte_lockptr(h, mm, pte);
+ ptl = huge_pte_lockptr(huge_page_shift(h), mm, pte);
spin_lock(ptl);
return ptl;
}
@@ -4987,7 +4987,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
}
dst_ptl = huge_pte_lock(h, dst, dst_pte);
- src_ptl = huge_pte_lockptr(h, src, src_pte);
+ src_ptl = huge_pte_lockptr(huge_page_shift(h), src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
entry = huge_ptep_get(src_pte);
again:
@@ -5068,7 +5068,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
/* Install the new huge page if src pte stable */
dst_ptl = huge_pte_lock(h, dst, dst_pte);
- src_ptl = huge_pte_lockptr(h, src, src_pte);
+ src_ptl = huge_pte_lockptr(huge_page_shift(h),
+ src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
entry = huge_ptep_get(src_pte);
if (!pte_same(src_pte_old, entry)) {
@@ -5122,7 +5123,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
pte_t pte;
dst_ptl = huge_pte_lock(h, mm, dst_pte);
- src_ptl = huge_pte_lockptr(h, mm, src_pte);
+ src_ptl = huge_pte_lockptr(huge_page_shift(h), mm, src_pte);
/*
* We don't have to worry about the ordering of src and dst ptlocks
@@ -360,7 +360,8 @@ void __migration_entry_wait_huge(struct vm_area_struct *vma,
void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
{
- spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
+ spinlock_t *ptl = huge_pte_lockptr(huge_page_shift(hstate_vma(vma)),
+ vma->vm_mm, pte);
__migration_entry_wait_huge(vma, pte, ptl);
}