2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
14 char *task_mem(struct mm_struct *mm, char *buffer)
16 unsigned long data, text, lib;
17 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
20 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 * hiwater_rss only when about to *lower* total_vm or rss. Any
22 * collector of these hiwater stats must therefore get total_vm
23 * and rss too, which will usually be the higher. Barriers? not
24 * worth the effort, such snapshots can always be inconsistent.
26 hiwater_vm = total_vm = mm->total_vm;
27 if (hiwater_vm < mm->hiwater_vm)
28 hiwater_vm = mm->hiwater_vm;
29 hiwater_rss = total_rss = get_mm_rss(mm);
30 if (hiwater_rss < mm->hiwater_rss)
31 hiwater_rss = mm->hiwater_rss;
33 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
34 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
35 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
36 buffer += sprintf(buffer,
47 hiwater_vm << (PAGE_SHIFT-10),
48 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49 mm->locked_vm << (PAGE_SHIFT-10),
50 hiwater_rss << (PAGE_SHIFT-10),
51 total_rss << (PAGE_SHIFT-10),
52 data << (PAGE_SHIFT-10),
53 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
58 unsigned long task_vsize(struct mm_struct *mm)
60 return PAGE_SIZE * mm->total_vm;
63 int task_statm(struct mm_struct *mm, int *shared, int *text,
64 int *data, int *resident)
66 *shared = get_mm_counter(mm, file_rss);
67 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
69 *data = mm->total_vm - mm->shared_vm;
70 *resident = *shared + get_mm_counter(mm, anon_rss);
74 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
76 struct vm_area_struct * vma;
78 struct task_struct *task = get_proc_task(inode);
79 struct mm_struct * mm = NULL;
82 mm = get_task_mm(task);
83 put_task_struct(task);
87 down_read(&mm->mmap_sem);
91 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
97 *mnt = mntget(vma->vm_file->f_path.mnt);
98 *dentry = dget(vma->vm_file->f_path.dentry);
102 up_read(&mm->mmap_sem);
108 static void pad_len_spaces(struct seq_file *m, int len)
110 len = 25 + sizeof(void*) * 6 - len;
113 seq_printf(m, "%*c", len, ' ');
116 struct mem_size_stats
118 unsigned long resident;
119 unsigned long shared_clean;
120 unsigned long shared_dirty;
121 unsigned long private_clean;
122 unsigned long private_dirty;
126 struct vm_area_struct *vma;
128 void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
129 unsigned long, void *);
132 static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
134 struct proc_maps_private *priv = m->private;
135 struct task_struct *task = priv->task;
136 struct vm_area_struct *vma = v;
137 struct mm_struct *mm = vma->vm_mm;
138 struct file *file = vma->vm_file;
139 int flags = vma->vm_flags;
140 unsigned long ino = 0;
145 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
146 dev = inode->i_sb->s_dev;
150 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
153 flags & VM_READ ? 'r' : '-',
154 flags & VM_WRITE ? 'w' : '-',
155 flags & VM_EXEC ? 'x' : '-',
156 flags & VM_MAYSHARE ? 's' : 'p',
157 vma->vm_pgoff << PAGE_SHIFT,
158 MAJOR(dev), MINOR(dev), ino, &len);
161 * Print the dentry name for named mappings, and a
162 * special [heap] marker for the heap:
165 pad_len_spaces(m, len);
166 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n");
168 const char *name = arch_vma_name(vma);
171 if (vma->vm_start <= mm->start_brk &&
172 vma->vm_end >= mm->brk) {
174 } else if (vma->vm_start <= mm->start_stack &&
175 vma->vm_end >= mm->start_stack) {
183 pad_len_spaces(m, len);
193 "Shared_Clean: %8lu kB\n"
194 "Shared_Dirty: %8lu kB\n"
195 "Private_Clean: %8lu kB\n"
196 "Private_Dirty: %8lu kB\n",
197 (vma->vm_end - vma->vm_start) >> 10,
199 mss->shared_clean >> 10,
200 mss->shared_dirty >> 10,
201 mss->private_clean >> 10,
202 mss->private_dirty >> 10);
204 if (m->count < m->size) /* vma is copied successfully */
205 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
209 static int show_map(struct seq_file *m, void *v)
211 return show_map_internal(m, v, NULL);
214 static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd,
215 unsigned long addr, unsigned long end,
218 struct mem_size_stats *mss = private;
223 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
224 for (; addr != end; pte++, addr += PAGE_SIZE) {
226 if (!pte_present(ptent))
229 mss->resident += PAGE_SIZE;
231 page = vm_normal_page(vma, addr, ptent);
235 if (page_mapcount(page) >= 2) {
236 if (pte_dirty(ptent))
237 mss->shared_dirty += PAGE_SIZE;
239 mss->shared_clean += PAGE_SIZE;
241 if (pte_dirty(ptent))
242 mss->private_dirty += PAGE_SIZE;
244 mss->private_clean += PAGE_SIZE;
247 pte_unmap_unlock(pte - 1, ptl);
251 static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud,
252 unsigned long addr, unsigned long end)
257 for (pmd = pmd_offset(pud, addr); addr != end;
258 pmd++, addr = next) {
259 next = pmd_addr_end(addr, end);
260 if (pmd_none_or_clear_bad(pmd))
262 walker->action(walker->vma, pmd, addr, next, walker->private);
266 static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd,
267 unsigned long addr, unsigned long end)
272 for (pud = pud_offset(pgd, addr); addr != end;
273 pud++, addr = next) {
274 next = pud_addr_end(addr, end);
275 if (pud_none_or_clear_bad(pud))
277 for_each_pmd_in_pud(walker, pud, addr, next);
281 static inline void for_each_pmd(struct vm_area_struct *vma,
282 void (*action)(struct vm_area_struct *, pmd_t *,
283 unsigned long, unsigned long,
287 unsigned long addr = vma->vm_start;
288 unsigned long end = vma->vm_end;
289 struct pmd_walker walker = {
297 for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
298 pgd++, addr = next) {
299 next = pgd_addr_end(addr, end);
300 if (pgd_none_or_clear_bad(pgd))
302 for_each_pud_in_pgd(&walker, pgd, addr, next);
306 static int show_smap(struct seq_file *m, void *v)
308 struct vm_area_struct *vma = v;
309 struct mem_size_stats mss;
311 memset(&mss, 0, sizeof mss);
312 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
313 for_each_pmd(vma, smaps_one_pmd, &mss);
314 return show_map_internal(m, v, &mss);
317 static void *m_start(struct seq_file *m, loff_t *pos)
319 struct proc_maps_private *priv = m->private;
320 unsigned long last_addr = m->version;
321 struct mm_struct *mm;
322 struct vm_area_struct *vma, *tail_vma = NULL;
325 /* Clear the per syscall fields in priv */
327 priv->tail_vma = NULL;
330 * We remember last_addr rather than next_addr to hit with
331 * mmap_cache most of the time. We have zero last_addr at
332 * the beginning and also after lseek. We will have -1 last_addr
333 * after the end of the vmas.
336 if (last_addr == -1UL)
339 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
343 mm = get_task_mm(priv->task);
347 priv->tail_vma = tail_vma = get_gate_vma(priv->task);
348 down_read(&mm->mmap_sem);
350 /* Start with last addr hint */
351 if (last_addr && (vma = find_vma(mm, last_addr))) {
357 * Check the vma index is within the range and do
358 * sequential scan until m_index.
361 if ((unsigned long)l < mm->map_count) {
368 if (l != mm->map_count)
369 tail_vma = NULL; /* After gate vma */
375 /* End of vmas has been reached */
376 m->version = (tail_vma != NULL)? 0: -1UL;
377 up_read(&mm->mmap_sem);
382 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
384 if (vma && vma != priv->tail_vma) {
385 struct mm_struct *mm = vma->vm_mm;
386 up_read(&mm->mmap_sem);
391 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
393 struct proc_maps_private *priv = m->private;
394 struct vm_area_struct *vma = v;
395 struct vm_area_struct *tail_vma = priv->tail_vma;
398 if (vma && (vma != tail_vma) && vma->vm_next)
401 return (vma != tail_vma)? tail_vma: NULL;
404 static void m_stop(struct seq_file *m, void *v)
406 struct proc_maps_private *priv = m->private;
407 struct vm_area_struct *vma = v;
411 put_task_struct(priv->task);
414 static struct seq_operations proc_pid_maps_op = {
421 static struct seq_operations proc_pid_smaps_op = {
428 static int do_maps_open(struct inode *inode, struct file *file,
429 struct seq_operations *ops)
431 struct proc_maps_private *priv;
433 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
435 priv->pid = proc_pid(inode);
436 ret = seq_open(file, ops);
438 struct seq_file *m = file->private_data;
447 static int maps_open(struct inode *inode, struct file *file)
449 return do_maps_open(inode, file, &proc_pid_maps_op);
452 const struct file_operations proc_maps_operations = {
456 .release = seq_release_private,
460 extern int show_numa_map(struct seq_file *m, void *v);
462 static struct seq_operations proc_pid_numa_maps_op = {
466 .show = show_numa_map
469 static int numa_maps_open(struct inode *inode, struct file *file)
471 return do_maps_open(inode, file, &proc_pid_numa_maps_op);
474 const struct file_operations proc_numa_maps_operations = {
475 .open = numa_maps_open,
478 .release = seq_release_private,
482 static int smaps_open(struct inode *inode, struct file *file)
484 return do_maps_open(inode, file, &proc_pid_smaps_op);
487 const struct file_operations proc_smaps_operations = {
491 .release = seq_release_private,