2 * arch/s390/lib/uaccess_pt.c
4 * User access functions based on page table walks.
6 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
10 #include <linux/errno.h>
11 #include <linux/hardirq.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
17 static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
20 struct vm_area_struct *vma;
25 down_read(&mm->mmap_sem);
26 vma = find_vma(mm, address);
29 if (unlikely(vma->vm_start > address)) {
30 if (!(vma->vm_flags & VM_GROWSDOWN))
32 if (expand_stack(vma, address))
37 /* page not present, check vm flags */
38 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
41 if (!(vma->vm_flags & VM_WRITE))
46 switch (handle_mm_fault(mm, vma, address, write_access)) {
62 up_read(&mm->mmap_sem);
66 up_read(&mm->mmap_sem);
67 if (is_init(current)) {
69 down_read(&mm->mmap_sem);
72 printk("VM: killing process %s\n", current->comm);
76 up_read(&mm->mmap_sem);
77 current->thread.prot_addr = address;
78 current->thread.trap_no = 0x11;
79 force_sig(SIGBUS, current);
83 static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
84 size_t n, int write_user)
86 struct mm_struct *mm = current->mm;
87 unsigned long offset, pfn, done, size;
95 spin_lock(&mm->page_table_lock);
97 pgd = pgd_offset(mm, uaddr);
98 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
101 pmd = pmd_offset(pgd, uaddr);
102 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
105 pte = pte_offset_map(pmd, uaddr);
106 if (!pte || !pte_present(*pte) ||
107 (write_user && !pte_write(*pte)))
114 offset = uaddr & (PAGE_SIZE - 1);
115 size = min(n - done, PAGE_SIZE - offset);
117 to = (void *)((pfn << PAGE_SHIFT) + offset);
120 from = (void *)((pfn << PAGE_SHIFT) + offset);
123 memcpy(to, from, size);
128 spin_unlock(&mm->page_table_lock);
131 spin_unlock(&mm->page_table_lock);
132 if (__handle_fault(mm, uaddr, write_user))
137 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
141 if (segment_eq(get_fs(), KERNEL_DS)) {
142 memcpy(to, (void __kernel __force *) from, n);
145 rc = __user_copy_pt((unsigned long) from, to, n, 0);
147 memset(to + n - rc, 0, rc);
151 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
153 if (segment_eq(get_fs(), KERNEL_DS)) {
154 memcpy((void __kernel __force *) to, from, n);
157 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);