@@ -52,7 +52,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
}
static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
@@ -65,9 +65,9 @@ pte_free_kernel(struct mm_struct *mm, pte_t *pte)
}
static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pte_alloc_one(struct mm_struct *mm)
{
- pte_t *pte = pte_alloc_one_kernel(mm, address);
+ pte_t *pte = pte_alloc_one_kernel(mm);
struct page *page;
if (!pte)
@@ -90,8 +90,7 @@ static inline int __get_order_pte(void)
return get_order(PTRS_PER_PTE * sizeof(pte_t));
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
@@ -102,7 +101,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
}
static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pte_alloc_one(struct mm_struct *mm)
{
pgtable_t pte_pg;
struct page *page;
@@ -81,7 +81,7 @@ static inline void clean_pte_table(pte_t *pte)
* +------------+
*/
static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
@@ -93,7 +93,7 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
}
static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
@@ -91,13 +91,13 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(PGALLOC_GFP);
}
static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
@@ -59,8 +59,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long) pgd);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
@@ -75,8 +74,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
}
/* _kernel variant gets to use a different allocator */
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
gfp_t flags = GFP_KERNEL | __GFP_ZERO;
return (pte_t *) __get_free_page(flags);
@@ -83,7 +83,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
pmd_val(*pmd_entry) = __pa(pte);
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
void *pg;
@@ -99,8 +99,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
return page;
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long addr)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
@@ -12,8 +12,7 @@ extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
extern const char bad_pmd_string[];
-extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
unsigned long page = __get_free_page(GFP_DMA);
@@ -32,8 +31,6 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
-#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
-
#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
(unsigned long)(page_address(page)))
@@ -50,8 +47,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_DMA, 0);
pte_t *pte;
@@ -8,7 +8,7 @@
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
@@ -28,7 +28,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
free_page((unsigned long) pte);
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
pte_t *pte;
@@ -35,8 +35,7 @@ do { \
tlb_remove_page((tlb), pte); \
} while (0)
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
unsigned long page = __get_free_page(GFP_KERNEL);
@@ -47,8 +46,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return (pte_t *) (page);
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, 0);
@@ -108,10 +108,9 @@ static inline void free_pgd_slow(pgd_t *pgd)
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *ptepage;
@@ -132,20 +131,6 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return ptepage;
}
-static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
- unsigned long address)
-{
- unsigned long *ret;
-
- ret = pte_quicklist;
- if (ret != NULL) {
- pte_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- }
- return (pte_t *)ret;
-}
-
static inline void pte_free_fast(pte_t *pte)
{
*(unsigned long **)pte = pte_quicklist;
@@ -235,8 +235,7 @@ unsigned long iopa(unsigned long addr)
return pa;
}
-__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
if (mem_init_done) {
@@ -50,14 +50,12 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_pages((unsigned long)pgd, PGD_ORDER);
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
@@ -22,8 +22,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
#define check_pgt_cache() do { } while (0)
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long addr)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
@@ -34,7 +33,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return pte;
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
pgtable_t pte;
@@ -37,8 +37,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_pages((unsigned long)pgd, PGD_ORDER);
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
@@ -47,8 +46,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return pte;
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
@@ -70,10 +70,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long)pgd);
}
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL, 0);
@@ -118,8 +118,7 @@ EXPORT_SYMBOL(iounmap);
* the memblock infrastructure.
*/
-pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
@@ -122,7 +122,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!page)
@@ -135,7 +135,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
}
static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
@@ -82,8 +82,8 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
-extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
@@ -42,7 +42,7 @@ extern struct kmem_cache *pgtable_cache[];
pgtable_cache[(shift) - 1]; \
})
-extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
+extern pte_t *pte_fragment_alloc(struct mm_struct *, int);
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
extern void pte_fragment_free(unsigned long *, int);
extern void pmd_fragment_free(unsigned long *);
@@ -192,16 +192,14 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
return (pgtable_t)pmd_page_vaddr(pmd);
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
- return (pte_t *)pte_fragment_alloc(mm, address, 1);
+ return (pte_t *)pte_fragment_alloc(mm, 1);
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- return (pgtable_t)pte_fragment_alloc(mm, address, 0);
+ return (pgtable_t)pte_fragment_alloc(mm, 0);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
@@ -83,8 +83,8 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
-extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
@@ -96,14 +96,12 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
pte_t *pte;
@@ -384,7 +384,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
return (pte_t *)ret;
}
-pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
+pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
{
pte_t *pte;
@@ -43,7 +43,7 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[], _sinittext[], _einittext[];
-__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
@@ -57,7 +57,7 @@ __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
return pte;
}
-pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *ptepage;
@@ -82,15 +82,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#endif /* __PAGETABLE_PMD_FOLDED */
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
@@ -139,8 +139,8 @@ static inline void pmd_populate(struct mm_struct *mm,
/*
* page table entry allocation/free routines.
*/
-#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
-#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
+#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
+#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
@@ -32,14 +32,12 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
/*
* Allocate and free page tables.
*/
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
void *pg;
@@ -58,10 +58,9 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
-pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
+pgtable_t pte_alloc_one(struct mm_struct *mm);
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
@@ -60,10 +60,8 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
kmem_cache_free(pgtable_cache, pmd);
}
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address);
-pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address);
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
+pgtable_t pte_alloc_one(struct mm_struct *mm);
void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
void pte_free(struct mm_struct *mm, pgtable_t ptepage);
@@ -2925,8 +2925,7 @@ void __flush_tlb_all(void)
: : "r" (pstate));
}
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
pte_t *pte = NULL;
@@ -2937,8 +2936,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return pte;
}
-pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
@@ -364,12 +364,12 @@ pgd_t *get_pgd_fast(void)
* Alignments up to the page size are the same for physical and virtual
* addresses of the nocache area.
*/
-pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm)
{
unsigned long pte;
struct page *page;
- if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
+ if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
return NULL;
page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
if (!pgtable_page_ctor(page)) {
@@ -25,8 +25,8 @@
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *);
+extern pgtable_t pte_alloc_one(struct mm_struct *);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
@@ -199,7 +199,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long) pgd);
}
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
@@ -207,7 +207,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
return pte;
}
-pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
@@ -34,7 +34,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
* Allocate one PTE table.
*/
static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
@@ -46,7 +46,7 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
}
static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
@@ -47,8 +47,8 @@ extern gfp_t __userpte_alloc_gfp;
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *);
+extern pgtable_t pte_alloc_one(struct mm_struct *);
/* Should really implement gc for free page table pages. This could be
done with a reference count in struct page. */
@@ -23,12 +23,12 @@ EXPORT_SYMBOL(physical_mask);
gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
}
-pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
@@ -38,8 +38,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long)pgd);
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *ptep;
int i;
@@ -52,13 +51,12 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return ptep;
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long addr)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
pte_t *pte;
struct page *page;
- pte = pte_alloc_one_kernel(mm, addr);
+ pte = pte_alloc_one_kernel(mm);
if (!pte)
return NULL;
page = virt_to_page(pte);
@@ -1817,8 +1817,8 @@ static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
-int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
+int __pte_alloc_kernel(pmd_t *pmd);
/*
* The following ifdef needed to get the 4level-fixup.h header to work.
@@ -1956,18 +1956,17 @@ static inline void pgtable_page_dtor(struct page *page)
pte_unmap(pte); \
} while (0)
-#define pte_alloc(mm, pmd, address) \
- (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
+#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
#define pte_alloc_map(mm, pmd, address) \
- (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
+ (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
- (pte_alloc(mm, pmd, address) ? \
+ (pte_alloc(mm, pmd) ? \
NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
#if USE_SPLIT_PMD_PTLOCKS
@@ -558,7 +558,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
return VM_FAULT_FALLBACK;
}
- pgtable = pte_alloc_one(vma->vm_mm, haddr);
+ pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable)) {
ret = VM_FAULT_OOM;
goto release;
@@ -702,7 +702,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
struct page *zero_page;
bool set;
vm_fault_t ret;
- pgtable = pte_alloc_one(vma->vm_mm, haddr);
+ pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable))
return VM_FAULT_OOM;
zero_page = mm_get_huge_zero_page(vma->vm_mm);
@@ -791,7 +791,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
return VM_FAULT_SIGBUS;
if (arch_needs_pgtable_deposit()) {
- pgtable = pte_alloc_one(vma->vm_mm, addr);
+ pgtable = pte_alloc_one(vma->vm_mm);
if (!pgtable)
return VM_FAULT_OOM;
}
@@ -927,7 +927,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (!vma_is_anonymous(vma))
return 0;
- pgtable = pte_alloc_one(dst_mm, addr);
+ pgtable = pte_alloc_one(dst_mm);
if (unlikely(!pgtable))
goto out;
@@ -120,7 +120,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
pte_t *p;
if (slab_is_available())
- p = pte_alloc_one_kernel(&init_mm, addr);
+ p = pte_alloc_one_kernel(&init_mm);
else
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
if (!p)
@@ -400,10 +400,10 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
}
}
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
{
spinlock_t *ptl;
- pgtable_t new = pte_alloc_one(mm, address);
+ pgtable_t new = pte_alloc_one(mm);
if (!new)
return -ENOMEM;
@@ -434,9 +434,9 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
return 0;
}
-int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+int __pte_alloc_kernel(pmd_t *pmd)
{
- pte_t *new = pte_alloc_one_kernel(&init_mm, address);
+ pte_t *new = pte_alloc_one_kernel(&init_mm);
if (!new)
return -ENOMEM;
@@ -2895,7 +2895,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
*
* Here we only have down_read(mmap_sem).
*/
- if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
+ if (pte_alloc(vma->vm_mm, vmf->pmd))
return VM_FAULT_OOM;
/* See the comment in pte_alloc_one_map() */
@@ -3042,7 +3042,7 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
spin_unlock(vmf->ptl);
vmf->prealloc_pte = NULL;
- } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
+ } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
return VM_FAULT_OOM;
}
map_pte:
@@ -3121,7 +3121,7 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
* related to pte entry. Use the preallocated table for that.
*/
if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
- vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
+ vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
smp_wmb(); /* See comment in __pte_alloc() */
@@ -3359,8 +3359,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
start_pgoff + nr_pages - 1);
if (pmd_none(*vmf->pmd)) {
- vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
- vmf->address);
+ vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
if (!vmf->prealloc_pte)
goto out;
smp_wmb(); /* See comment in __pte_alloc() */
@@ -2599,7 +2599,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
*
* Here we only have down_read(mmap_sem).
*/
- if (pte_alloc(mm, pmdp, addr))
+ if (pte_alloc(mm, pmdp))
goto abort;
/* See the comment in pte_alloc_one_map() */
@@ -238,7 +238,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (pmd_trans_unstable(old_pmd))
continue;
}
- if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
+ if (pte_alloc(new_vma->vm_mm, new_pmd))
break;
next = (new_addr + PMD_SIZE) & PMD_MASK;
if (extent > next - new_addr)
@@ -513,7 +513,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
break;
}
if (unlikely(pmd_none(dst_pmdval)) &&
- unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
+ unlikely(__pte_alloc(dst_mm, dst_pmd))) {
err = -ENOMEM;
break;
}
@@ -628,7 +628,7 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
BUG_ON(pmd_sect(*pmd));
if (pmd_none(*pmd)) {
- pte = pte_alloc_one_kernel(NULL, addr);
+ pte = pte_alloc_one_kernel(NULL);
if (!pte) {
kvm_err("Cannot allocate Hyp pte\n");
return -ENOMEM;