2 * include/asm-s390/pgtable.h
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (weigand@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 * Derived from "include/asm-i386/pgtable.h"
13 #ifndef _ASM_S390_PGTABLE_H
14 #define _ASM_S390_PGTABLE_H
16 #include <asm-generic/4level-fixup.h>
19 * The Linux memory management assumes a three-level page table setup. For
20 * s390 31 bit we "fold" the mid level into the top-level page table, so
21 * that we physically have the same two-level page table as the s390 mmu
22 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
23 * the hardware provides (region first and region second tables are not
26 * The "pgd_xxx()" functions are trivial for a folded two-level
27 * setup: the pgd is never bad, and a pmd always exists (as it's folded
30 * This file contains the functions and defines necessary to modify and use
31 * the S390 page table tree.
34 #include <linux/mm_types.h>
36 #include <asm/processor.h>
38 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39 extern void paging_init(void);
40 extern void vmem_map_init(void);
43 * The S390 doesn't have any external MMU info: the kernel page
44 * tables contain all the necessary information.
46 #define update_mmu_cache(vma, address, pte) do { } while (0)
49 * ZERO_PAGE is a global shared page that is always zero: used
50 * for zero-mapped memory areas etc..
52 extern char empty_zero_page[PAGE_SIZE];
53 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
54 #endif /* !__ASSEMBLY__ */
57 * PMD_SHIFT determines the size of the area a second-level page
59 * PGDIR_SHIFT determines what a third-level page table entry can map
63 # define PGDIR_SHIFT 22
66 # define PGDIR_SHIFT 31
67 #endif /* __s390x__ */
69 #define PMD_SIZE (1UL << PMD_SHIFT)
70 #define PMD_MASK (~(PMD_SIZE-1))
71 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
72 #define PGDIR_MASK (~(PGDIR_SIZE-1))
75 * entries per page directory level: the S390 is two-level, so
76 * we don't really have any PMD directory physically.
77 * for S390 segment-table entries are combined to one PGD
78 * that leads to 1024 pte per pgd
81 # define PTRS_PER_PTE 1024
82 # define PTRS_PER_PMD 1
83 # define PTRS_PER_PGD 512
85 # define PTRS_PER_PTE 512
86 # define PTRS_PER_PMD 1024
87 # define PTRS_PER_PGD 2048
88 #endif /* __s390x__ */
90 #define FIRST_USER_ADDRESS 0
92 #define pte_ERROR(e) \
93 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
94 #define pmd_ERROR(e) \
95 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
96 #define pgd_ERROR(e) \
97 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
101 * Just any arbitrary offset to the start of the vmalloc VM area: the
102 * current 8MB value just means that there will be a 8MB "hole" after the
103 * physical memory until the kernel virtual memory starts. That means that
104 * any out-of-bounds memory accesses will hopefully be caught.
105 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
106 * area for the same reason. ;)
107 * vmalloc area starts at 4GB to prevent syscall table entry exchanging
110 extern unsigned long vmalloc_end;
113 #define VMALLOC_ADDR (max(0x100000000UL, (unsigned long) high_memory))
115 #define VMALLOC_ADDR ((unsigned long) high_memory)
117 #define VMALLOC_OFFSET (8*1024*1024)
118 #define VMALLOC_START ((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
119 #define VMALLOC_END vmalloc_end
122 * We need some free virtual space to be able to do vmalloc.
123 * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
124 * area. On a machine with 2GB memory we make sure that we
125 * have at least 128MB free space for vmalloc. On a machine
126 * with 4TB we make sure we have at least 128GB.
129 #define VMALLOC_MIN_SIZE 0x8000000UL
130 #define VMALLOC_END_INIT 0x80000000UL
131 #else /* __s390x__ */
132 #define VMALLOC_MIN_SIZE 0x2000000000UL
133 #define VMALLOC_END_INIT 0x40000000000UL
134 #endif /* __s390x__ */
137 * A 31 bit pagetable entry of S390 has following format:
140 * 00000000001111111111222222222233
141 * 01234567890123456789012345678901
143 * I Page-Invalid Bit: Page is not available for address-translation
144 * P Page-Protection Bit: Store access not possible for page
146 * A 31 bit segmenttable entry of S390 has following format:
147 * | P-table origin | |PTL
149 * 00000000001111111111222222222233
150 * 01234567890123456789012345678901
152 * I Segment-Invalid Bit: Segment is not available for address-translation
153 * C Common-Segment Bit: Segment is not private (PoP 3-30)
154 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
156 * The 31 bit segmenttable origin of S390 has following format:
158 * |S-table origin | | STL |
160 * 00000000001111111111222222222233
161 * 01234567890123456789012345678901
163 * X Space-Switch event:
164 * G Segment-Invalid Bit: *
165 * P Private-Space Bit: Segment is not private (PoP 3-30)
166 * S Storage-Alteration:
167 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
169 * A 64 bit pagetable entry of S390 has following format:
171 * 0000000000111111111122222222223333333333444444444455555555556666
172 * 0123456789012345678901234567890123456789012345678901234567890123
174 * I Page-Invalid Bit: Page is not available for address-translation
175 * P Page-Protection Bit: Store access not possible for page
177 * A 64 bit segmenttable entry of S390 has following format:
178 * | P-table origin | TT
179 * 0000000000111111111122222222223333333333444444444455555555556666
180 * 0123456789012345678901234567890123456789012345678901234567890123
182 * I Segment-Invalid Bit: Segment is not available for address-translation
183 * C Common-Segment Bit: Segment is not private (PoP 3-30)
184 * P Page-Protection Bit: Store access not possible for page
187 * A 64 bit region table entry of S390 has following format:
188 * | S-table origin | TF TTTL
189 * 0000000000111111111122222222223333333333444444444455555555556666
190 * 0123456789012345678901234567890123456789012345678901234567890123
192 * I Segment-Invalid Bit: Segment is not available for address-translation
197 * The 64 bit regiontable origin of S390 has following format:
198 * | region table origon | DTTL
199 * 0000000000111111111122222222223333333333444444444455555555556666
200 * 0123456789012345678901234567890123456789012345678901234567890123
202 * X Space-Switch event:
203 * G Segment-Invalid Bit:
204 * P Private-Space Bit:
205 * S Storage-Alteration:
209 * A storage key has the following format:
213 * F : fetch protection bit
218 /* Hardware bits in the page table entry */
219 #define _PAGE_RO 0x200 /* HW read-only bit */
220 #define _PAGE_INVALID 0x400 /* HW invalid bit */
222 /* Software bits in the page table entry */
223 #define _PAGE_SWT 0x001 /* SW pte type bit t */
224 #define _PAGE_SWX 0x002 /* SW pte type bit x */
226 /* Six different types of pages. */
227 #define _PAGE_TYPE_EMPTY 0x400
228 #define _PAGE_TYPE_NONE 0x401
229 #define _PAGE_TYPE_SWAP 0x403
230 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
231 #define _PAGE_TYPE_RO 0x200
232 #define _PAGE_TYPE_RW 0x000
233 #define _PAGE_TYPE_EX_RO 0x202
234 #define _PAGE_TYPE_EX_RW 0x002
237 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
238 * pte_none and pte_file to find out the pte type WITHOUT holding the page
239 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
240 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
241 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
242 * This change is done while holding the lock, but the intermediate step
243 * of a previously valid pte with the hw invalid bit set can be observed by
244 * handle_pte_fault. That makes it necessary that all valid pte types with
245 * the hw invalid bit set must be distinguishable from the four pte types
246 * empty, none, swap and file.
249 * _PAGE_TYPE_EMPTY 1000 -> 1000
250 * _PAGE_TYPE_NONE 1001 -> 1001
251 * _PAGE_TYPE_SWAP 1011 -> 1011
252 * _PAGE_TYPE_FILE 11?1 -> 11?1
253 * _PAGE_TYPE_RO 0100 -> 1100
254 * _PAGE_TYPE_RW 0000 -> 1000
255 * _PAGE_TYPE_EX_RO 0110 -> 1110
256 * _PAGE_TYPE_EX_RW 0010 -> 1010
258 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
259 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
260 * pte_file is true for bits combinations 1101, 1111
261 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
266 /* Bits in the segment table address-space-control-element */
267 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
268 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
269 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
270 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
271 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
273 /* Bits in the segment table entry */
274 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
275 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
276 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
277 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
279 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
280 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
282 #else /* __s390x__ */
284 /* Bits in the segment/region table address-space-control-element */
285 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
286 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
287 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
288 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
289 #define _ASCE_REAL_SPACE 0x20 /* real space control */
290 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
291 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
292 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
293 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
294 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
295 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
297 /* Bits in the region table entry */
298 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
299 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
300 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
301 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
302 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
303 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
304 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
306 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
307 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
308 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
309 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
310 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
311 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
313 /* Bits in the segment table entry */
314 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
315 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
316 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
318 #define _SEGMENT_ENTRY (0)
319 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
321 #endif /* __s390x__ */
324 * A user page table pointer has the space-switch-event bit, the
325 * private-space-control bit and the storage-alteration-event-control
326 * bit set. A kernel page table pointer doesn't need them.
328 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
331 /* Bits int the storage key */
332 #define _PAGE_CHANGED 0x02 /* HW changed bit */
333 #define _PAGE_REFERENCED 0x04 /* HW referenced bit */
336 * Page protection definitions.
338 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
339 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
340 #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
341 #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
342 #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
344 #define PAGE_KERNEL PAGE_RW
345 #define PAGE_COPY PAGE_RO
348 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
349 * Write permission always implies read permission. In theory with a
350 * primary/secondary page table execute only can be implemented but
351 * it would cost an additional bit in the pte to distinguish all the
352 * different pte types. To avoid that execute permission currently
353 * implies read permission as well.
356 #define __P000 PAGE_NONE
357 #define __P001 PAGE_RO
358 #define __P010 PAGE_RO
359 #define __P011 PAGE_RO
360 #define __P100 PAGE_EX_RO
361 #define __P101 PAGE_EX_RO
362 #define __P110 PAGE_EX_RO
363 #define __P111 PAGE_EX_RO
365 #define __S000 PAGE_NONE
366 #define __S001 PAGE_RO
367 #define __S010 PAGE_RW
368 #define __S011 PAGE_RW
369 #define __S100 PAGE_EX_RO
370 #define __S101 PAGE_EX_RO
371 #define __S110 PAGE_EX_RW
372 #define __S111 PAGE_EX_RW
375 # define PxD_SHADOW_SHIFT 1
376 #else /* __s390x__ */
377 # define PxD_SHADOW_SHIFT 2
378 #endif /* __s390x__ */
380 static inline struct page *get_shadow_page(struct page *page)
382 if (s390_noexec && page->index)
383 return virt_to_page((void *)(addr_t) page->index);
387 static inline void *get_shadow_pte(void *table)
389 unsigned long addr, offset;
392 addr = (unsigned long) table;
393 offset = addr & (PAGE_SIZE - 1);
394 page = virt_to_page((void *)(addr ^ offset));
395 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
398 static inline void *get_shadow_table(void *table)
400 unsigned long addr, offset;
403 addr = (unsigned long) table;
404 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
405 page = virt_to_page((void *)(addr ^ offset));
406 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
410 * Certain architectures need to do special things when PTEs
411 * within a page table are directly modified. Thus, the following
412 * hook is made available.
414 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
415 pte_t *pteptr, pte_t pteval)
417 pte_t *shadow_pte = get_shadow_pte(pteptr);
421 if (!(pte_val(pteval) & _PAGE_INVALID) &&
422 (pte_val(pteval) & _PAGE_SWX))
423 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
425 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
430 * pgd/pmd/pte query functions
434 static inline int pgd_present(pgd_t pgd) { return 1; }
435 static inline int pgd_none(pgd_t pgd) { return 0; }
436 static inline int pgd_bad(pgd_t pgd) { return 0; }
438 #else /* __s390x__ */
440 static inline int pgd_present(pgd_t pgd)
442 return pgd_val(pgd) & _REGION_ENTRY_ORIGIN;
445 static inline int pgd_none(pgd_t pgd)
447 return pgd_val(pgd) & _REGION_ENTRY_INV;
450 static inline int pgd_bad(pgd_t pgd)
452 unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV;
453 return (pgd_val(pgd) & mask) != _REGION3_ENTRY;
456 #endif /* __s390x__ */
458 static inline int pmd_present(pmd_t pmd)
460 return pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
463 static inline int pmd_none(pmd_t pmd)
465 return pmd_val(pmd) & _SEGMENT_ENTRY_INV;
468 static inline int pmd_bad(pmd_t pmd)
470 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
471 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
474 static inline int pte_none(pte_t pte)
476 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
479 static inline int pte_present(pte_t pte)
481 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
482 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
483 (!(pte_val(pte) & _PAGE_INVALID) &&
484 !(pte_val(pte) & _PAGE_SWT));
487 static inline int pte_file(pte_t pte)
489 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
490 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
493 #define __HAVE_ARCH_PTE_SAME
494 #define pte_same(a,b) (pte_val(a) == pte_val(b))
497 * query functions pte_write/pte_dirty/pte_young only work if
498 * pte_present() is true. Undefined behaviour if not..
500 static inline int pte_write(pte_t pte)
502 return (pte_val(pte) & _PAGE_RO) == 0;
505 static inline int pte_dirty(pte_t pte)
507 /* A pte is neither clean nor dirty on s/390. The dirty bit
508 * is in the storage key. See page_test_and_clear_dirty for
514 static inline int pte_young(pte_t pte)
516 /* A pte is neither young nor old on s/390. The young bit
517 * is in the storage key. See page_test_and_clear_young for
524 * pgd/pmd/pte modification functions
529 static inline void pgd_clear(pgd_t * pgdp) { }
531 static inline void pmd_clear_kernel(pmd_t * pmdp)
533 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
534 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
535 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
536 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
539 #else /* __s390x__ */
541 static inline void pgd_clear_kernel(pgd_t * pgdp)
543 pgd_val(*pgdp) = _REGION3_ENTRY_EMPTY;
546 static inline void pgd_clear(pgd_t * pgdp)
548 pgd_t *shadow_pgd = get_shadow_table(pgdp);
550 pgd_clear_kernel(pgdp);
552 pgd_clear_kernel(shadow_pgd);
555 static inline void pmd_clear_kernel(pmd_t * pmdp)
557 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
558 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
561 #endif /* __s390x__ */
563 static inline void pmd_clear(pmd_t * pmdp)
565 pmd_t *shadow_pmd = get_shadow_table(pmdp);
567 pmd_clear_kernel(pmdp);
569 pmd_clear_kernel(shadow_pmd);
572 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
574 pte_t *shadow_pte = get_shadow_pte(ptep);
576 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
578 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
582 * The following pte modification functions only work if
583 * pte_present() is true. Undefined behaviour if not..
585 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
587 pte_val(pte) &= PAGE_MASK;
588 pte_val(pte) |= pgprot_val(newprot);
592 static inline pte_t pte_wrprotect(pte_t pte)
594 /* Do not clobber _PAGE_TYPE_NONE pages! */
595 if (!(pte_val(pte) & _PAGE_INVALID))
596 pte_val(pte) |= _PAGE_RO;
600 static inline pte_t pte_mkwrite(pte_t pte)
602 pte_val(pte) &= ~_PAGE_RO;
606 static inline pte_t pte_mkclean(pte_t pte)
608 /* The only user of pte_mkclean is the fork() code.
609 We must *not* clear the *physical* page dirty bit
610 just because fork() wants to clear the dirty bit in
611 *one* of the page's mappings. So we just do nothing. */
615 static inline pte_t pte_mkdirty(pte_t pte)
617 /* We do not explicitly set the dirty bit because the
618 * sske instruction is slow. It is faster to let the
619 * next instruction set the dirty bit.
624 static inline pte_t pte_mkold(pte_t pte)
626 /* S/390 doesn't keep its dirty/referenced bit in the pte.
627 * There is no point in clearing the real referenced bit.
632 static inline pte_t pte_mkyoung(pte_t pte)
634 /* S/390 doesn't keep its dirty/referenced bit in the pte.
635 * There is no point in setting the real referenced bit.
640 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
641 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
642 unsigned long addr, pte_t *ptep)
647 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
648 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
649 unsigned long address, pte_t *ptep)
651 /* No need to flush TLB; bits are in storage key */
655 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
657 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
659 /* S390 has 1mb segments, we are emulating 4MB segments */
660 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
662 /* ipte in zarch mode can do the math */
667 : "=m" (*ptep) : "m" (*ptep),
668 "a" (pto), "a" (address));
670 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
673 static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
675 __ptep_ipte(address, ptep);
676 ptep = get_shadow_pte(ptep);
678 __ptep_ipte(address, ptep);
682 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
683 * both clear the TLB for the unmapped pte. The reason is that
684 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
685 * to modify an active pte. The sequence is
686 * 1) ptep_get_and_clear
689 * On s390 the tlb needs to get flushed with the modification of the pte
690 * if the pte is active. The only way how this can be implemented is to
691 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
694 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
695 #define ptep_get_and_clear(__mm, __address, __ptep) \
697 pte_t __pte = *(__ptep); \
698 if (atomic_read(&(__mm)->mm_users) > 1 || \
699 (__mm) != current->active_mm) \
700 ptep_invalidate(__address, __ptep); \
702 pte_clear((__mm), (__address), (__ptep)); \
706 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
707 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
708 unsigned long address, pte_t *ptep)
711 ptep_invalidate(address, ptep);
716 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
717 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
718 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
719 * cannot be accessed while the batched unmap is running. In this case
720 * full==1 and a simple pte_clear is enough. See tlb.h.
722 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
723 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
725 pte_t *ptep, int full)
730 pte_clear(mm, addr, ptep);
732 ptep_invalidate(addr, ptep);
736 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
737 #define ptep_set_wrprotect(__mm, __addr, __ptep) \
739 pte_t __pte = *(__ptep); \
740 if (pte_write(__pte)) { \
741 if (atomic_read(&(__mm)->mm_users) > 1 || \
742 (__mm) != current->active_mm) \
743 ptep_invalidate(__addr, __ptep); \
744 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
748 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
749 #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
751 int __changed = !pte_same(*(__ptep), __entry); \
753 ptep_invalidate(__addr, __ptep); \
754 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
760 * Test and clear dirty bit in storage key.
761 * We can't clear the changed bit atomically. This is a potential
762 * race against modification of the referenced bit. This function
763 * should therefore only be called if it is not mapped in any
766 #define __HAVE_ARCH_PAGE_TEST_DIRTY
767 static inline int page_test_dirty(struct page *page)
769 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
772 #define __HAVE_ARCH_PAGE_CLEAR_DIRTY
773 static inline void page_clear_dirty(struct page *page)
775 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
779 * Test and clear referenced bit in storage key.
781 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
782 static inline int page_test_and_clear_young(struct page *page)
784 unsigned long physpage = page_to_phys(page);
791 : "=d" (ccode) : "a" (physpage) : "cc" );
796 * Conversion functions: convert a page and protection to a page entry,
797 * and a page entry and page directory to the page they refer to.
799 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
802 pte_val(__pte) = physpage + pgprot_val(pgprot);
806 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
808 unsigned long physpage = page_to_phys(page);
810 return mk_pte_phys(physpage, pgprot);
813 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
815 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
817 return mk_pte_phys(physpage, pgprot);
822 static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
824 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
826 return __pmd(physpage + pgprot_val(pgprot));
829 #endif /* __s390x__ */
831 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
832 #define pte_page(x) pfn_to_page(pte_pfn(x))
834 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
836 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
838 #define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK)
840 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
842 /* to find an entry in a page-table-directory */
843 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
844 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
846 /* to find an entry in a kernel page-table-directory */
847 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
851 /* Find an entry in the second-level page table.. */
852 static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
854 return (pmd_t *) dir;
857 #else /* __s390x__ */
859 /* Find an entry in the second-level page table.. */
860 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
861 #define pmd_offset(dir,addr) \
862 ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr))
864 #endif /* __s390x__ */
866 /* Find an entry in the third-level page table.. */
867 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
868 #define pte_offset_kernel(pmd, address) \
869 ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
870 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
871 #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
872 #define pte_unmap(pte) do { } while (0)
873 #define pte_unmap_nested(pte) do { } while (0)
876 * 31 bit swap entry format:
877 * A page-table entry has some bits we have to treat in a special way.
878 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
879 * exception will occur instead of a page translation exception. The
880 * specifiation exception has the bad habit not to store necessary
881 * information in the lowcore.
882 * Bit 21 and bit 22 are the page invalid bit and the page protection
883 * bit. We set both to indicate a swapped page.
884 * Bit 30 and 31 are used to distinguish the different page types. For
885 * a swapped page these bits need to be zero.
886 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
887 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
888 * plus 24 for the offset.
889 * 0| offset |0110|o|type |00|
890 * 0 0000000001111111111 2222 2 22222 33
891 * 0 1234567890123456789 0123 4 56789 01
893 * 64 bit swap entry format:
894 * A page-table entry has some bits we have to treat in a special way.
895 * Bits 52 and bit 55 have to be zero, otherwise an specification
896 * exception will occur instead of a page translation exception. The
897 * specifiation exception has the bad habit not to store necessary
898 * information in the lowcore.
899 * Bit 53 and bit 54 are the page invalid bit and the page protection
900 * bit. We set both to indicate a swapped page.
901 * Bit 62 and 63 are used to distinguish the different page types. For
902 * a swapped page these bits need to be zero.
903 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
904 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
905 * plus 56 for the offset.
906 * | offset |0110|o|type |00|
907 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
908 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
911 #define __SWP_OFFSET_MASK (~0UL >> 12)
913 #define __SWP_OFFSET_MASK (~0UL >> 11)
915 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
918 offset &= __SWP_OFFSET_MASK;
919 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
920 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
924 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
925 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
926 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
928 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
929 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
932 # define PTE_FILE_MAX_BITS 26
933 #else /* __s390x__ */
934 # define PTE_FILE_MAX_BITS 59
935 #endif /* __s390x__ */
937 #define pte_to_pgoff(__pte) \
938 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
940 #define pgoff_to_pte(__off) \
941 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
944 #endif /* !__ASSEMBLY__ */
946 #define kern_addr_valid(addr) (1)
948 extern int add_shared_memory(unsigned long start, unsigned long size);
949 extern int remove_shared_memory(unsigned long start, unsigned long size);
952 * No page table caches to initialise
954 #define pgtable_cache_init() do { } while (0)
956 #define __HAVE_ARCH_MEMMAP_INIT
957 extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
959 #include <asm-generic/pgtable.h>
961 #endif /* _S390_PAGE_H */