]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/s390/lib/uaccess_pt.c
24ead559c7bb829d4c5f5d578d1b280386b90dd5
[linux-2.6-omap-h63xx.git] / arch / s390 / lib / uaccess_pt.c
1 /*
2  *  arch/s390/lib/uaccess_pt.c
3  *
4  *  User access functions based on page table walks.
5  *
6  *    Copyright IBM Corp. 2006
7  *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
8  */
9
10 #include <linux/errno.h>
11 #include <linux/hardirq.h>
12 #include <linux/mm.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
15 #include "uaccess.h"
16
17 static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
18                                  int write_access)
19 {
20         struct vm_area_struct *vma;
21         int ret = -EFAULT;
22
23         if (in_atomic())
24                 return ret;
25         down_read(&mm->mmap_sem);
26         vma = find_vma(mm, address);
27         if (unlikely(!vma))
28                 goto out;
29         if (unlikely(vma->vm_start > address)) {
30                 if (!(vma->vm_flags & VM_GROWSDOWN))
31                         goto out;
32                 if (expand_stack(vma, address))
33                         goto out;
34         }
35
36         if (!write_access) {
37                 /* page not present, check vm flags */
38                 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
39                         goto out;
40         } else {
41                 if (!(vma->vm_flags & VM_WRITE))
42                         goto out;
43         }
44
45 survive:
46         switch (handle_mm_fault(mm, vma, address, write_access)) {
47         case VM_FAULT_MINOR:
48                 current->min_flt++;
49                 break;
50         case VM_FAULT_MAJOR:
51                 current->maj_flt++;
52                 break;
53         case VM_FAULT_SIGBUS:
54                 goto out_sigbus;
55         case VM_FAULT_OOM:
56                 goto out_of_memory;
57         default:
58                 BUG();
59         }
60         ret = 0;
61 out:
62         up_read(&mm->mmap_sem);
63         return ret;
64
65 out_of_memory:
66         up_read(&mm->mmap_sem);
67         if (is_init(current)) {
68                 yield();
69                 down_read(&mm->mmap_sem);
70                 goto survive;
71         }
72         printk("VM: killing process %s\n", current->comm);
73         return ret;
74
75 out_sigbus:
76         up_read(&mm->mmap_sem);
77         current->thread.prot_addr = address;
78         current->thread.trap_no = 0x11;
79         force_sig(SIGBUS, current);
80         return ret;
81 }
82
83 static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
84                                     size_t n, int write_user)
85 {
86         struct mm_struct *mm = current->mm;
87         unsigned long offset, pfn, done, size;
88         pgd_t *pgd;
89         pmd_t *pmd;
90         pte_t *pte;
91         void *from, *to;
92
93         done = 0;
94 retry:
95         spin_lock(&mm->page_table_lock);
96         do {
97                 pgd = pgd_offset(mm, uaddr);
98                 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
99                         goto fault;
100
101                 pmd = pmd_offset(pgd, uaddr);
102                 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
103                         goto fault;
104
105                 pte = pte_offset_map(pmd, uaddr);
106                 if (!pte || !pte_present(*pte) ||
107                     (write_user && !pte_write(*pte)))
108                         goto fault;
109
110                 pfn = pte_pfn(*pte);
111                 if (!pfn_valid(pfn))
112                         goto out;
113
114                 offset = uaddr & (PAGE_SIZE - 1);
115                 size = min(n - done, PAGE_SIZE - offset);
116                 if (write_user) {
117                         to = (void *)((pfn << PAGE_SHIFT) + offset);
118                         from = kptr + done;
119                 } else {
120                         from = (void *)((pfn << PAGE_SHIFT) + offset);
121                         to = kptr + done;
122                 }
123                 memcpy(to, from, size);
124                 done += size;
125                 uaddr += size;
126         } while (done < n);
127 out:
128         spin_unlock(&mm->page_table_lock);
129         return n - done;
130 fault:
131         spin_unlock(&mm->page_table_lock);
132         if (__handle_fault(mm, uaddr, write_user))
133                 return n - done;
134         goto retry;
135 }
136
137 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
138 {
139         size_t rc;
140
141         if (segment_eq(get_fs(), KERNEL_DS)) {
142                 memcpy(to, (void __kernel __force *) from, n);
143                 return 0;
144         }
145         rc = __user_copy_pt((unsigned long) from, to, n, 0);
146         if (unlikely(rc))
147                 memset(to + n - rc, 0, rc);
148         return rc;
149 }
150
151 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
152 {
153         if (segment_eq(get_fs(), KERNEL_DS)) {
154                 memcpy((void __kernel __force *) to, from, n);
155                 return 0;
156         }
157         return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
158 }