#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-
+#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <linux/kallsyms.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
struct vm_struct *vmlist;
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
- int node);
+ int node, void *caller);
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
}
EXPORT_SYMBOL_GPL(map_vm_area);
-static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
- unsigned long start, unsigned long end,
- int node, gfp_t gfp_mask)
+/*
+ * Map a vmalloc()-space virtual address to the physical page.
+ */
+struct page *vmalloc_to_page(const void *vmalloc_addr)
+{
+ unsigned long addr = (unsigned long) vmalloc_addr;
+ struct page *page = NULL;
+ pgd_t *pgd = pgd_offset_k(addr);
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ if (!pgd_none(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud)) {
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd)) {
+ ptep = pte_offset_map(pmd, addr);
+ pte = *ptep;
+ if (pte_present(pte))
+ page = pte_page(pte);
+ pte_unmap(ptep);
+ }
+ }
+ }
+ return page;
+}
+EXPORT_SYMBOL(vmalloc_to_page);
+
+/*
+ * Map a vmalloc()-space virtual address to the physical page frame number.
+ */
+unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
+{
+ return page_to_pfn(vmalloc_to_page(vmalloc_addr));
+}
+EXPORT_SYMBOL(vmalloc_to_pfn);
+
+static struct vm_struct *
+__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
+ unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
struct vm_struct **p, *tmp, *area;
unsigned long align = 1;
if (addr > end - size)
goto out;
}
+ if ((size + addr) < addr)
+ goto out;
+ if (addr > end - size)
+ goto out;
found:
area->next = *p;
area->pages = NULL;
area->nr_pages = 0;
area->phys_addr = 0;
+ area->caller = caller;
write_unlock(&vmlist_lock);
return area;
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
- return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
+ return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(__get_vm_area);
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
- return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
+ return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+ -1, GFP_KERNEL, __builtin_return_address(0));
+}
+
+struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
+ void *caller)
+{
+ return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+ -1, GFP_KERNEL, caller);
}
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
int node, gfp_t gfp_mask)
{
return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
- gfp_mask);
+ gfp_mask, __builtin_return_address(0));
}
/* Caller must hold vmlist_lock */
-static struct vm_struct *__find_vm_area(void *addr)
+static struct vm_struct *__find_vm_area(const void *addr)
{
struct vm_struct *tmp;
}
/* Caller must hold vmlist_lock */
-static struct vm_struct *__remove_vm_area(void *addr)
+static struct vm_struct *__remove_vm_area(const void *addr)
{
struct vm_struct **p, *tmp;
* This function returns the found VM area, but using it is NOT safe
* on SMP machines, except for its size or flags.
*/
-struct vm_struct *remove_vm_area(void *addr)
+struct vm_struct *remove_vm_area(const void *addr)
{
struct vm_struct *v;
write_lock(&vmlist_lock);
return v;
}
-static void __vunmap(void *addr, int deallocate_pages)
+static void __vunmap(const void *addr, int deallocate_pages)
{
struct vm_struct *area;
int i;
for (i = 0; i < area->nr_pages; i++) {
- BUG_ON(!area->pages[i]);
- __free_page(area->pages[i]);
+ struct page *page = area->pages[i];
+
+ BUG_ON(!page);
+ __free_page(page);
}
if (area->flags & VM_VPAGES)
*
* Must not be called in interrupt context.
*/
-void vfree(void *addr)
+void vfree(const void *addr)
{
BUG_ON(in_interrupt());
__vunmap(addr, 1);
*
* Must not be called in interrupt context.
*/
-void vunmap(void *addr)
+void vunmap(const void *addr)
{
BUG_ON(in_interrupt());
__vunmap(addr, 0);
if (count > num_physpages)
return NULL;
- area = get_vm_area((count << PAGE_SHIFT), flags);
+ area = get_vm_area_caller((count << PAGE_SHIFT), flags,
+ __builtin_return_address(0));
if (!area)
return NULL;
+
if (map_vm_area(area, prot, &pages)) {
vunmap(area->addr);
return NULL;
}
EXPORT_SYMBOL(vmap);
-void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node)
+static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ pgprot_t prot, int node, void *caller)
{
struct page **pages;
unsigned int nr_pages, array_size, i;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
- PAGE_KERNEL, node);
+ PAGE_KERNEL, node, caller);
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size,
node);
}
area->pages = pages;
+ area->caller = caller;
if (!area->pages) {
remove_vm_area(area->addr);
kfree(area);
}
for (i = 0; i < area->nr_pages; i++) {
+ struct page *page;
+
if (node < 0)
- area->pages[i] = alloc_page(gfp_mask);
+ page = alloc_page(gfp_mask);
else
- area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
- if (unlikely(!area->pages[i])) {
+ page = alloc_pages_node(node, gfp_mask, 0);
+
+ if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
area->nr_pages = i;
goto fail;
}
+ area->pages[i] = page;
}
if (map_vm_area(area, prot, &pages))
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
{
- return __vmalloc_area_node(area, gfp_mask, prot, -1);
+ return __vmalloc_area_node(area, gfp_mask, prot, -1,
+ __builtin_return_address(0));
}
/**
* kernel virtual space, using a pagetable protection of @prot.
*/
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
- int node)
+ int node, void *caller)
{
struct vm_struct *area;
if (!size || (size >> PAGE_SHIFT) > num_physpages)
return NULL;
- area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
+ area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
+ node, gfp_mask, caller);
+
if (!area)
return NULL;
- return __vmalloc_area_node(area, gfp_mask, prot, node);
+ return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
}
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
- return __vmalloc_node(size, gfp_mask, prot, -1);
+ return __vmalloc_node(size, gfp_mask, prot, -1,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__vmalloc);
*/
void *vmalloc(unsigned long size)
{
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+ return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+ -1, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc);
*/
void *vmalloc_node(unsigned long size, int node)
{
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
+ return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+ node, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_node);
* @vma: vma to cover (map full range of vma)
* @addr: vmalloc memory
* @pgoff: number of pages into addr before first page to map
- * @returns: 0 for success, -Exxx on failure
+ *
+ * Returns: 0 for success, -Exxx on failure
*
* This function checks that addr is a valid vmalloc'ed area, and
* that it is big enough to cover the vma. Will return failure if
}
-static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
{
/* apply_to_page_range() does all the hard work. */
return 0;
/**
* alloc_vm_area - allocate a range of kernel address space
* @size: size of the area
- * @returns: NULL on failure, vm_struct on success
+ *
+ * Returns: NULL on failure, vm_struct on success
*
* This function reserves a range of kernel address space, and
* allocates pagetables to map that range. No actual mappings
{
struct vm_struct *area;
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area_caller(size, VM_IOREMAP,
+ __builtin_return_address(0));
if (area == NULL)
return NULL;
kfree(area);
}
EXPORT_SYMBOL_GPL(free_vm_area);
+
+
+#ifdef CONFIG_PROC_FS
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+ struct vm_struct *v;
+
+ read_lock(&vmlist_lock);
+ v = vmlist;
+ while (n > 0 && v) {
+ n--;
+ v = v->next;
+ }
+ if (!n)
+ return v;
+
+ return NULL;
+
+}
+
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct vm_struct *v = p;
+
+ ++*pos;
+ return v->next;
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+ read_unlock(&vmlist_lock);
+}
+
+static int s_show(struct seq_file *m, void *p)
+{
+ struct vm_struct *v = p;
+
+ seq_printf(m, "0x%p-0x%p %7ld",
+ v->addr, v->addr + v->size, v->size);
+
+ if (v->caller) {
+ char buff[2 * KSYM_NAME_LEN];
+
+ seq_putc(m, ' ');
+ sprint_symbol(buff, (unsigned long)v->caller);
+ seq_puts(m, buff);
+ }
+
+ if (v->nr_pages)
+ seq_printf(m, " pages=%d", v->nr_pages);
+
+ if (v->phys_addr)
+ seq_printf(m, " phys=%lx", v->phys_addr);
+
+ if (v->flags & VM_IOREMAP)
+ seq_printf(m, " ioremap");
+
+ if (v->flags & VM_ALLOC)
+ seq_printf(m, " vmalloc");
+
+ if (v->flags & VM_MAP)
+ seq_printf(m, " vmap");
+
+ if (v->flags & VM_USERMAP)
+ seq_printf(m, " user");
+
+ if (v->flags & VM_VPAGES)
+ seq_printf(m, " vpages");
+
+ seq_putc(m, '\n');
+ return 0;
+}
+
+const struct seq_operations vmalloc_op = {
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = s_show,
+};
+#endif
+