*/
static inline int uncached_access(struct file *file, unsigned long addr)
{
-#if defined(__i386__)
+#if defined(__i386__) && !defined(__arch_um__)
/*
* On the PPro and successors, the MTRRs are used to set
* memory types for physical addresses outside main memory,
test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
&& addr >= __pa(high_memory);
-#elif defined(__x86_64__)
+#elif defined(__x86_64__) && !defined(__arch_um__)
/*
* This is broken because it can generate memory type aliases,
* which can cause cache corruptions
}
#endif
+#ifdef CONFIG_NONPROMISC_DEVMEM
+static inline int range_is_allowed(unsigned long from, unsigned long to)
+{
+ unsigned long cursor;
+
+ cursor = from >> PAGE_SHIFT;
+ while ((cursor << PAGE_SHIFT) < to) {
+ if (!devmem_is_allowed(cursor)) {
+ printk(KERN_INFO "Program %s tried to read /dev/mem "
+ "between %lx->%lx.\n",
+ current->comm, from, to);
+ return 0;
+ }
+ cursor++;
+ }
+ return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long from, unsigned long to)
+{
+ return 1;
+}
+#endif
+
/*
* This funcion reads the *physical* memory. The f_pos points directly to the
* memory location.
*/
ptr = xlate_dev_mem_ptr(p);
+ if (!range_is_allowed(p, p+count))
+ return -EPERM;
if (copy_to_user(buf, ptr, sz))
return -EFAULT;
buf += sz;
*/
ptr = xlate_dev_mem_ptr(p);
+ if (!range_is_allowed(p, p+sz))
+ return -EPERM;
copied = copy_from_user(ptr, buf, sz);
if (copied) {
written += sz - copied;
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
-#ifdef CONFIG_MMU
-/*
- * For fun, we are using the MMU for this.
- */
-static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
-{
- struct mm_struct *mm;
- struct vm_area_struct * vma;
- unsigned long addr=(unsigned long)buf;
-
- mm = current->mm;
- /* Oops, this was forgotten before. -ben */
- down_read(&mm->mmap_sem);
-
- /* For private mappings, just map in zero pages. */
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
- unsigned long count;
-
- if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
- goto out_up;
- if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
- break;
- count = vma->vm_end - addr;
- if (count > size)
- count = size;
-
- zap_page_range(vma, addr, count, NULL);
- if (zeromap_page_range(vma, addr, count, PAGE_COPY))
- break;
-
- size -= count;
- buf += count;
- addr += count;
- if (size == 0)
- goto out_up;
- }
-
- up_read(&mm->mmap_sem);
-
- /* The shared case is hard. Let's do the conventional zeroing. */
- do {
- unsigned long unwritten = clear_user(buf, PAGE_SIZE);
- if (unwritten)
- return size + unwritten - PAGE_SIZE;
- cond_resched();
- buf += PAGE_SIZE;
- size -= PAGE_SIZE;
- } while (size);
-
- return size;
-out_up:
- up_read(&mm->mmap_sem);
- return size;
-}
-
static ssize_t read_zero(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- unsigned long left, unwritten, written = 0;
+ size_t written;
if (!count)
return 0;
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
- left = count;
-
- /* do we want to be clever? Arbitrary cut-off */
- if (count >= PAGE_SIZE*4) {
- unsigned long partial;
+ written = 0;
+ while (count) {
+ unsigned long unwritten;
+ size_t chunk = count;
- /* How much left of the page? */
- partial = (PAGE_SIZE-1) & -(unsigned long) buf;
- unwritten = clear_user(buf, partial);
- written = partial - unwritten;
+ if (chunk > PAGE_SIZE)
+ chunk = PAGE_SIZE; /* Just for latency reasons */
+ unwritten = clear_user(buf, chunk);
+ written += chunk - unwritten;
if (unwritten)
- goto out;
- left -= partial;
- buf += partial;
- unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
- written += (left & PAGE_MASK) - unwritten;
- if (unwritten)
- goto out;
- buf += left & PAGE_MASK;
- left &= ~PAGE_MASK;
- }
- unwritten = clear_user(buf, left);
- written += left - unwritten;
-out:
- return written ? written : -EFAULT;
-}
-
-static int mmap_zero(struct file * file, struct vm_area_struct * vma)
-{
- int err;
-
- if (vma->vm_flags & VM_SHARED)
- return shmem_zero_setup(vma);
- err = zeromap_page_range(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
- BUG_ON(err == -EEXIST);
- return err;
-}
-#else /* CONFIG_MMU */
-static ssize_t read_zero(struct file * file, char * buf,
- size_t count, loff_t *ppos)
-{
- size_t todo = count;
-
- while (todo) {
- size_t chunk = todo;
-
- if (chunk > 4096)
- chunk = 4096; /* Just for latency reasons */
- if (clear_user(buf, chunk))
- return -EFAULT;
+ break;
buf += chunk;
- todo -= chunk;
+ count -= chunk;
cond_resched();
}
- return count;
+ return written ? written : -EFAULT;
}
static int mmap_zero(struct file * file, struct vm_area_struct * vma)
{
+#ifndef CONFIG_MMU
return -ENOSYS;
+#endif
+ if (vma->vm_flags & VM_SHARED)
+ return shmem_zero_setup(vma);
+ return 0;
}
-#endif /* CONFIG_MMU */
static ssize_t write_full(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
static int __init chr_dev_init(void)
{
int i;
+ int err;
+
+ err = bdi_init(&zero_bdi);
+ if (err)
+ return err;
if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
printk("unable to get major %d for memory devs\n", MEM_MAJOR);