Background: I've implemented 1K/2K page tables for s390. These sub-page
page tables are required to properly support the s390 virtualization
instruction with KVM. The SIE instruction requires that the page tables
have 256 page table entries (pte) followed by 256 page status table entries
(pgste). The pgstes are only required if the process is using the SIE
instruction. The pgstes are updated by the hardware and by the hypervisor
for a number of reasons, one of them is dirty and reference bit tracking.
To avoid wasting memory the standard pte table allocation should return
1K/2K (31/64 bit) and 2K/4K if the process is using SIE.
Problem: Page size on s390 is 4K, page table size is 1K or 2K. That means
the s390 version for pte_alloc_one cannot return a pointer to a struct
page. Trouble is that with the CONFIG_HIGHPTE feature on x86 pte_alloc_one
cannot return a pointer to a pte either, since that would require more than
32 bit for the return value of pte_alloc_one (and the pte * would not be
accessible since its not kmapped).
Solution: The only solution I found to this dilemma is a new typedef: a
pgtable_t. For s390 pgtable_t will be a (pte *) - to be introduced with a
later patch. For everybody else it will be a (struct page *). The
additional problem with the initialization of the ptl lock and the
NR_PAGETABLE accounting is solved with a constructor pgtable_page_ctor and
a destructor pgtable_page_dtor. The page table allocation and free
functions need to call these two whenever a page table page is allocated or
freed. pmd_populate will get a pgtable_t instead of a struct page pointer.
To get the pgtable_t back from a pmd entry that has been installed with
pmd_populate a new function pmd_pgtable is added. It replaces the pmd_page
call in free_pte_range and apply_to_pte_range.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
53 files changed:
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
#else
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
#endif
#else
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
#endif
- flush_dcache_page(page);
+ pgtable_page_ctor(page);
+ flush_dcache_page(page);
+ }
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
{
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
- gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO;
- gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
#endif
ptepage = alloc_pages(flags, 0);
#endif
ptepage = alloc_pages(flags, 0);
- if (ptepage)
- clear_highpage(ptepage);
+ if (!ptepage)
+ return NULL;
+ pgtable_page_ctor(ptepage);
free_page((unsigned long)pte);
}
free_page((unsigned long)pte);
}
-void pte_free(struct mm_struct *mm, struct page *ptepage)
+void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
+ pgtable_page_dtor(ptepage);
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
#endif
ptepage = alloc_pages(flags, 0);
#endif
ptepage = alloc_pages(flags, 0);
+ pgtable_page_ctor(ptepage);
+ }
free_page((unsigned long)pte);
}
free_page((unsigned long)pte);
}
-void pte_free(struct mm_struct *mm, struct page *ptepage)
+void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
+ pgtable_page_dtor(ptepage);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
page->index = (addr_t) table;
}
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
page->index = (addr_t) table;
}
+ pgtable_page_ctor(page);
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
return table;
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
return table;
{
unsigned long *shadow = get_shadow_pte(table);
{
unsigned long *shadow = get_shadow_pte(table);
+ pgtable_page_dtor(virt_to_page(table));
if (shadow)
free_page((unsigned long) shadow);
free_page((unsigned long) table);
if (shadow)
free_page((unsigned long) shadow);
free_page((unsigned long) table);
return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long pte;
srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long pte;
if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
return NULL;
if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
return NULL;
- return pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
+ page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
+ pgtable_page_ctor(page);
+ return page;
}
static void srmmu_free_pte_fast(pte_t *pte)
}
static void srmmu_free_pte_fast(pte_t *pte)
srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
}
srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
}
-static void srmmu_pte_free(struct page *pte)
+static void srmmu_pte_free(pgtable_t pte)
+ pgtable_page_dtor(pte);
p = (unsigned long)page_address(pte); /* Cached address (for test) */
if (p == 0)
BUG();
p = (unsigned long)page_address(pte); /* Cached address (for test) */
if (p == 0)
BUG();
-static struct page *sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static pgtable_t sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
- pte_t *pte = sun4c_pte_alloc_one_kernel(mm, address);
+ pte_t *pte;
+ struct page *page;
+
+ pte = sun4c_pte_alloc_one_kernel(mm, address);
if (pte == NULL)
return NULL;
if (pte == NULL)
return NULL;
- return virt_to_page(pte);
+ page = virt_to_page(pte);
+ pgtable_page_ctor(page);
+ return page;
}
static inline void sun4c_free_pte_fast(pte_t *pte)
}
static inline void sun4c_free_pte_fast(pte_t *pte)
-static void sun4c_pte_free(struct page *pte)
+static void sun4c_pte_free(pgtable_t pte)
+ pgtable_page_dtor(pte);
sun4c_free_pte_fast(page_address(pte));
}
sun4c_free_pte_fast(page_address(pte));
}
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
{
struct page *pte;
pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ if (pte)
+ pgtable_page_ctor(pte);
return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
}
return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
}
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
#else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif
#else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif
+ if (pte)
+ pgtable_page_ctor(pte);
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
+ pgtable_page_dtor(pte);
paravirt_release_pt(page_to_pfn(pte));
tlb_remove_page(tlb, pte);
}
paravirt_release_pt(page_to_pfn(pte));
tlb_remove_page(tlb, pte);
}
#endif /* STRICT_MM_TYPECHECKS */
#endif /* STRICT_MM_TYPECHECKS */
+typedef struct page *pgtable_t;
+
#ifdef USE_48_BIT_KSEG
#define PAGE_OFFSET 0xffff800000000000UL
#else
#ifdef USE_48_BIT_KSEG
#define PAGE_OFFSET 0xffff800000000000UL
#else
-pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
free_page((unsigned long)pte);
}
free_page((unsigned long)pte);
}
-static inline struct page *
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
- pte_t *pte = pte_alloc_one_kernel(mm, addr);
- if (pte)
- return virt_to_page(pte);
- return NULL;
+ pte_t *pte = pte_alloc_one_kernel(mm, address);
+ struct page *page;
+
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ pgtable_page_ctor(page);
+ return page;
-pte_free(struct mm_struct *mm, struct page *page)
+pte_free(struct mm_struct *mm, pgtable_t page)
+ pgtable_page_dtor(page);
#endif /* STRICT_MM_TYPECHECKS */
#endif /* STRICT_MM_TYPECHECKS */
+typedef struct page *pgtable_t;
+
#endif /* CONFIG_MMU */
#include <asm/memory.h>
#endif /* CONFIG_MMU */
#include <asm/memory.h>
-static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *pte;
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *pte;
if (pte) {
void *page = page_address(pte);
clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
if (pte) {
void *page = page_address(pte);
clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
+ pgtable_page_ctor(pte);
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
-pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
+pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
}
{
__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
}
{
set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables
/*
* Allocate and free page tables
struct page *pte;
pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
struct page *pte;
pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ if (!pte)
+ return NULL;
+ pgtable_page_ctor(pte);
free_page((unsigned long)pte);
}
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
#define check_pgt_cache() do { } while(0)
#define check_pgt_cache() do { } while(0)
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#endif
#define pte_val(x) ((x).pte)
#endif
#define pte_val(x) ((x).pte)
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte))
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte))
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
/*
* Allocate and free page tables.
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ pgtable_page_ctor(pte);
free_page((unsigned long)pte);
}
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
-
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
#define check_pgt_cache() do { } while (0)
#define check_pgt_cache() do { } while (0)
typedef struct { pmd_t pue[1]; } pud_t;
typedef struct { pud_t pge[1]; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { pmd_t pue[1]; } pud_t;
typedef struct { pud_t pge[1]; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).ste[0])
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).ste[0])
do { \
__set_pmd((PMD), page_to_pfn(PAGE) << PAGE_SHIFT | _PAGE_TABLE); \
} while(0)
do { \
__set_pmd((PMD), page_to_pfn(PAGE) << PAGE_SHIFT | _PAGE_TABLE); \
} while(0)
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
/*
* Allocate and free page tables.
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb),(pte)); \
+} while (0)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
#endif
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#endif
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+ typedef struct page *pgtable_t;
# define pte_val(x) ((x).pte)
# define pmd_val(x) ((x).pmd)
# define pte_val(x) ((x).pte)
# define pmd_val(x) ((x).pmd)
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
+ typedef struct page *pgtable_t;
# endif
# define pte_val(x) (x)
# endif
# define pte_val(x) (x)
#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
static inline void
#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
static inline void
-pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
+pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
{
pmd_val(*pmd_entry) = page_to_phys(pte);
}
{
pmd_val(*pmd_entry) = page_to_phys(pte);
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
pmd_val(*pmd_entry) = __pa(pte);
}
pmd_val(*pmd_entry) = __pa(pte);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long addr)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
- void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
- return pg ? virt_to_page(pg) : NULL;
+ struct page *page;
+ void *pg;
+
+ pg = quicklist_alloc(0, GFP_KERNEL, NULL);
+ if (!pg)
+ return NULL;
+ page = virt_to_page(pg);
+ pgtable_page_ctor(page);
+ return page;
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
quicklist_free_page(0, NULL, pte);
}
quicklist_free_page(0, NULL, pte);
}
#define PTE_MASK PAGE_MASK
typedef struct { unsigned long pgprot; } pgprot_t;
#define PTE_MASK PAGE_MASK
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
}
{
set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
/*
* Allocate and free page tables.
-static __inline__ struct page *pte_alloc_one(struct mm_struct *mm,
+static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
unsigned long address)
{
struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ pgtable_page_ctor(pte);
free_page((unsigned long)pte);
}
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
free_page((unsigned long) pte);
}
free_page((unsigned long) pte);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
pte_t *pte;
{
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
pte_t *pte;
nocache_page(pte);
}
kunmap(pte);
nocache_page(pte);
}
kunmap(pte);
+ pgtable_page_ctor(page);
-static inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t page)
+ pgtable_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
}
cache_page(kmap(page));
kunmap(page);
__free_page(page);
}
-static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
+ pgtable_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
{
pmd_set(pmd, page_address(page));
}
{
pmd_set(pmd, page_address(page));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
typedef struct { unsigned long pmd[16]; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pmd[16]; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((&x)->pmd[0])
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((&x)->pmd[0])
free_page((unsigned long) pte);
}
free_page((unsigned long) pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t page)
+ pgtable_page_dtor(page);
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
return (pte_t *) (page);
}
return (pte_t *) (page);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
{
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
{
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
return NULL;
clear_highpage(page);
return NULL;
clear_highpage(page);
+ pgtable_page_ctor(page);
pmd_val(*pmd) = __pa((unsigned long)pte);
}
pmd_val(*pmd) = __pa((unsigned long)pte);
}
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
{
pmd_val(*pmd) = __pa((unsigned long)page_address(page));
}
{
pmd_val(*pmd) = __pa((unsigned long)page_address(page));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) } )
#endif
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) } )
#endif
+typedef struct page *pgtable_t;
/*
* For 3-level pagetables we defines these ourselves, for 2-level the
/*
* For 3-level pagetables we defines these ourselves, for 2-level the
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Initialize a new pmd table with invalid pointers.
/*
* Initialize a new pmd table with invalid pointers.
struct page *pte;
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
struct page *pte;
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pgtable_page_ctor(pte);
+ }
free_pages((unsigned long)pte, PTE_ORDER);
}
free_pages((unsigned long)pte, PTE_ORDER);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
__free_pages(pte, PTE_ORDER);
}
__free_pages(pte, PTE_ORDER);
}
-#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
#endif /* STRICT_MM_TYPECHECKS */
#endif /* STRICT_MM_TYPECHECKS */
+typedef struct page *pgtable_t;
typedef struct __physmem_range {
unsigned long start_pfn;
typedef struct __physmem_range {
unsigned long start_pfn;
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_pgtable(pmd) pmd_page(pmd)
-static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ if (page)
+ pgtable_page_ctor(page);
free_page((unsigned long)pte);
}
free_page((unsigned long)pte);
}
-#define pte_free(mm, page) pte_free_kernel(page_address(page))
+static inline void pte_free_kernel(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_page_dtor(pte);
+ pte_free_kernel(page_address((pte));
+}
#define check_pgt_cache() do { } while (0)
#define check_pgt_cache() do { } while (0)
+typedef struct page *pgtable_t;
+
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
(pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
(pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
#else
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
#else
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
#endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
-extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
-extern void pte_free(struct mm_struct *mm, struct page *pte);
+extern void pte_free(struct mm_struct *mm, pgtable_t pte);
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+#define pmd_pgtable(pmd) pmd_page(pmd)
#else /* CONFIG_PPC_64K_PAGES */
#else /* CONFIG_PPC_64K_PAGES */
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC_64K_PAGES */
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
}
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
- pte_t *pte = pte_alloc_one_kernel(mm, address);
- return pte ? virt_to_page(pte) : NULL;
+ struct page *page;
+ pte_t *pte;
+
+ pte = pte_alloc_one_kernel(mm, address);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ pgtable_page_ctor(page);
+ return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
free_page((unsigned long)pte);
}
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+ pgtable_page_dtor(ptepage);
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
-#define __pte_free_tlb(tlb, ptepage) \
+#define __pte_free_tlb(tlb,ptepage) \
+do { \
+ pgtable_page_dtor(ptepage); \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
- PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
+ PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
+} while (0)
#define __pmd_free_tlb(tlb, pmd) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
#define __pmd_free_tlb(tlb, pmd) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
(pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
(pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
#else
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
#else
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
+#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
#endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
-extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
-extern void pte_free(struct mm_struct *mm, struct page *pte);
+extern void pte_free(struct mm_struct *mm, pgtable_t pte);
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
+typedef struct page *pgtable_t;
+
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
-pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
{
pte_t *pte = (pte_t *)page_to_phys(page);
pmd_t *shadow_pmd = get_shadow_table(pmd);
{
pte_t *pte = (pte_t *)page_to_phys(page);
pmd_t *shadow_pmd = get_shadow_table(pmd);
if (shadow_pmd && shadow_pte)
pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
}
if (shadow_pmd && shadow_pte)
pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* page table entry allocation/free routines.
/*
* page table entry allocation/free routines.
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
-static inline void pte_free_tlb(struct mmu_gather *tlb, struct page *page)
+static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
{
if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = page;
{
if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = page;
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
+typedef struct page *pgtable_t;
+
#endif /* !__ASSEMBLY__ */
/*
#endif /* !__ASSEMBLY__ */
/*
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void pgd_ctor(void *x)
{
static inline void pgd_ctor(void *x)
{
return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
}
return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
- void *pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
- return pg ? virt_to_page(pg) : NULL;
+ struct page *page;
+ void *pg;
+
+ pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ if (!pg)
+ return NULL;
+ page = virt_to_page(pg);
+ pgtable_page_ctor(page);
+ return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
quicklist_free(QUICK_PT, NULL, pte);
}
quicklist_free(QUICK_PT, NULL, pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
quicklist_free_page(QUICK_PT, NULL, pte);
}
quicklist_free_page(QUICK_PT, NULL, pte);
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), (pte)); \
+} while (0)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
+typedef struct page *pgtable_t;
+
extern unsigned long sparc_unmapped_base;
BTFIXUPDEF_SETHI(sparc_unmapped_base)
extern unsigned long sparc_unmapped_base;
BTFIXUPDEF_SETHI(sparc_unmapped_base)
BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
+#define pmd_pgtable(pmd) pmd_page(pmd)
BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
-BTFIXUPDEF_CALL(struct page *, pte_alloc_one, struct mm_struct *, unsigned long)
+BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
-BTFIXUPDEF_CALL(void, pte_free, struct page *)
+BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif /* (STRICT_MM_TYPECHECKS) */
#endif /* (STRICT_MM_TYPECHECKS) */
+typedef struct page *pgtable_t;
+
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
(_AC(0x0000000070000000,UL)) : \
(_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
(_AC(0x0000000070000000,UL)) : \
(_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
- void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
- return pg ? virt_to_page(pg) : NULL;
+ struct page *page;
+ void *pg;
+
+ pg = quicklist_alloc(0, GFP_KERNEL, NULL);
+ if (!pg)
+ return NULL;
+ page = virt_to_page(pg);
+ pgtable_page_ctor(page);
+ return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
quicklist_free(0, NULL, pte);
}
quicklist_free(0, NULL, pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+ pgtable_page_dtor(ptepage);
quicklist_free_page(0, NULL, ptepage);
}
quicklist_free_page(0, NULL, ptepage);
}
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void check_pgt_cache(void)
{
static inline void check_pgt_cache(void)
{
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT)))
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
/*
* Allocate and free page tables.
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb),(pte)); \
+} while (0)
#ifdef CONFIG_3_LEVEL_PGTABLES
#ifdef CONFIG_3_LEVEL_PGTABLES
typedef union { pteval_t pte, pte_low; } pte_t;
typedef pte_t boot_pte_t;
typedef union { pteval_t pte, pte_low; } pte_t;
typedef pte_t boot_pte_t;
+typedef struct page *pgtable_t;
+
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_X86_PAE */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_X86_PAE */
typedef unsigned long pgprotval_t;
typedef unsigned long phys_addr_t;
typedef unsigned long pgprotval_t;
typedef unsigned long phys_addr_t;
+typedef struct page *pgtable_t;
+
typedef struct { pteval_t pte; } pte_t;
#define vmemmap ((struct page *)VMEMMAP_START)
typedef struct { pteval_t pte; } pte_t;
#define vmemmap ((struct page *)VMEMMAP_START)
paravirt_alloc_pt(mm, pfn);
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
}
paravirt_alloc_pt(mm, pfn);
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
}
+#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
/*
* Allocate and free page tables.
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
#define pgd_populate(mm, pgd, pud) \
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
#define pgd_populate(mm, pgd, pud) \
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
- void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ struct page *page;
+ void *p;
+
+ p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
- return virt_to_page(p);
+ page = virt_to_page(p);
+ pgtable_page_ctor(page);
+ return page;
}
/* Should really implement gc for free page table pages. This could be
}
/* Should really implement gc for free page table pages. This could be
free_page((unsigned long)pte);
}
free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ pgtable_page_dtor(pte);
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) \
+do { \
+ pgtable_page_dtor((pte)); \
+ tlb_remove_page((tlb), (pte)); \
+} while (0)
#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
typedef struct { unsigned long pte; } pte_t; /* page table entry */
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pte; } pte_t; /* page table entry */
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
}
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long addr)
+static inline pte_token_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long addr)
- return virt_to_page(pte_alloc_one_kernel(mm, addr));
+ struct page *page;
+
+ page = virt_to_page(pte_alloc_one_kernel(mm, addr));
+ pgtable_page_ctor(page);
+ return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
kmem_cache_free(pgtable_cache, pte);
}
kmem_cache_free(pgtable_cache, pte);
}
-static inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
- kmem_cache_free(pgtable_cache, page_address(page));
+ pgtable_page_dtor(pte);
+ kmem_cache_free(pgtable_cache, page_address(pte));
+#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */
#endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+static inline void pgtable_page_ctor(struct page *page)
+{
+ pte_lock_init(page);
+ inc_zone_page_state(page, NR_PAGETABLE);
+}
+
+static inline void pgtable_page_dtor(struct page *page)
+{
+ pte_lock_deinit(page);
+ dec_zone_page_state(page, NR_PAGETABLE);
+}
+
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
({ \
spinlock_t *__ptl = pte_lockptr(mm, pmd); \
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
({ \
spinlock_t *__ptl = pte_lockptr(mm, pmd); \
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
-typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
+typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
*/
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
{
*/
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
{
- struct page *page = pmd_page(*pmd);
+ pgtable_t token = pmd_pgtable(*pmd);
- pte_lock_deinit(page);
- pte_free_tlb(tlb, page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ pte_free_tlb(tlb, token);
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
- struct page *new = pte_alloc_one(mm, address);
+ pgtable_t new = pte_alloc_one(mm, address);
if (!new)
return -ENOMEM;
if (!new)
return -ENOMEM;
spin_lock(&mm->page_table_lock);
spin_lock(&mm->page_table_lock);
- if (pmd_present(*pmd)) { /* Another has populated it */
- pte_lock_deinit(new);
- pte_free(mm, new);
- } else {
+ if (!pmd_present(*pmd)) { /* Has another populated it ? */
- inc_zone_page_state(new, NR_PAGETABLE);
pmd_populate(mm, pmd, new);
pmd_populate(mm, pmd, new);
}
spin_unlock(&mm->page_table_lock);
}
spin_unlock(&mm->page_table_lock);
+ if (new)
+ pte_free(mm, new);
return -ENOMEM;
spin_lock(&init_mm.page_table_lock);
return -ENOMEM;
spin_lock(&init_mm.page_table_lock);
- if (pmd_present(*pmd)) /* Another has populated it */
- pte_free_kernel(&init_mm, new);
- else
+ if (!pmd_present(*pmd)) { /* Has another populated it ? */
pmd_populate_kernel(&init_mm, pmd, new);
pmd_populate_kernel(&init_mm, pmd, new);
spin_unlock(&init_mm.page_table_lock);
spin_unlock(&init_mm.page_table_lock);
+ if (new)
+ pte_free_kernel(&init_mm, new);
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
- pmd_page = pmd_page(*pmd);
+ token = pmd_pgtable(*pmd);
- err = fn(pte, pmd_page, addr, data);
+ err = fn(pte, token, addr, data);
if (err)
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
if (err)
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
-static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
{
/* apply_to_page_range() does all the hard work. */
return 0;
{
/* apply_to_page_range() does all the hard work. */
return 0;