]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/proc/task_mmu.c
smaps: extract pmd walker from smaps code
[linux-2.6-omap-h63xx.git] / fs / proc / task_mmu.c
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
8
9 #include <asm/elf.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
12 #include "internal.h"
13
14 char *task_mem(struct mm_struct *mm, char *buffer)
15 {
16         unsigned long data, text, lib;
17         unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
18
19         /*
20          * Note: to minimize their overhead, mm maintains hiwater_vm and
21          * hiwater_rss only when about to *lower* total_vm or rss.  Any
22          * collector of these hiwater stats must therefore get total_vm
23          * and rss too, which will usually be the higher.  Barriers? not
24          * worth the effort, such snapshots can always be inconsistent.
25          */
26         hiwater_vm = total_vm = mm->total_vm;
27         if (hiwater_vm < mm->hiwater_vm)
28                 hiwater_vm = mm->hiwater_vm;
29         hiwater_rss = total_rss = get_mm_rss(mm);
30         if (hiwater_rss < mm->hiwater_rss)
31                 hiwater_rss = mm->hiwater_rss;
32
33         data = mm->total_vm - mm->shared_vm - mm->stack_vm;
34         text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
35         lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
36         buffer += sprintf(buffer,
37                 "VmPeak:\t%8lu kB\n"
38                 "VmSize:\t%8lu kB\n"
39                 "VmLck:\t%8lu kB\n"
40                 "VmHWM:\t%8lu kB\n"
41                 "VmRSS:\t%8lu kB\n"
42                 "VmData:\t%8lu kB\n"
43                 "VmStk:\t%8lu kB\n"
44                 "VmExe:\t%8lu kB\n"
45                 "VmLib:\t%8lu kB\n"
46                 "VmPTE:\t%8lu kB\n",
47                 hiwater_vm << (PAGE_SHIFT-10),
48                 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49                 mm->locked_vm << (PAGE_SHIFT-10),
50                 hiwater_rss << (PAGE_SHIFT-10),
51                 total_rss << (PAGE_SHIFT-10),
52                 data << (PAGE_SHIFT-10),
53                 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54                 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
55         return buffer;
56 }
57
58 unsigned long task_vsize(struct mm_struct *mm)
59 {
60         return PAGE_SIZE * mm->total_vm;
61 }
62
63 int task_statm(struct mm_struct *mm, int *shared, int *text,
64                int *data, int *resident)
65 {
66         *shared = get_mm_counter(mm, file_rss);
67         *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
68                                                                 >> PAGE_SHIFT;
69         *data = mm->total_vm - mm->shared_vm;
70         *resident = *shared + get_mm_counter(mm, anon_rss);
71         return mm->total_vm;
72 }
73
74 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
75 {
76         struct vm_area_struct * vma;
77         int result = -ENOENT;
78         struct task_struct *task = get_proc_task(inode);
79         struct mm_struct * mm = NULL;
80
81         if (task) {
82                 mm = get_task_mm(task);
83                 put_task_struct(task);
84         }
85         if (!mm)
86                 goto out;
87         down_read(&mm->mmap_sem);
88
89         vma = mm->mmap;
90         while (vma) {
91                 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
92                         break;
93                 vma = vma->vm_next;
94         }
95
96         if (vma) {
97                 *mnt = mntget(vma->vm_file->f_path.mnt);
98                 *dentry = dget(vma->vm_file->f_path.dentry);
99                 result = 0;
100         }
101
102         up_read(&mm->mmap_sem);
103         mmput(mm);
104 out:
105         return result;
106 }
107
108 static void pad_len_spaces(struct seq_file *m, int len)
109 {
110         len = 25 + sizeof(void*) * 6 - len;
111         if (len < 1)
112                 len = 1;
113         seq_printf(m, "%*c", len, ' ');
114 }
115
116 struct mem_size_stats
117 {
118         unsigned long resident;
119         unsigned long shared_clean;
120         unsigned long shared_dirty;
121         unsigned long private_clean;
122         unsigned long private_dirty;
123 };
124
125 struct pmd_walker {
126         struct vm_area_struct *vma;
127         void *private;
128         void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
129                        unsigned long, void *);
130 };
131
132 static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
133 {
134         struct proc_maps_private *priv = m->private;
135         struct task_struct *task = priv->task;
136         struct vm_area_struct *vma = v;
137         struct mm_struct *mm = vma->vm_mm;
138         struct file *file = vma->vm_file;
139         int flags = vma->vm_flags;
140         unsigned long ino = 0;
141         dev_t dev = 0;
142         int len;
143
144         if (file) {
145                 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
146                 dev = inode->i_sb->s_dev;
147                 ino = inode->i_ino;
148         }
149
150         seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
151                         vma->vm_start,
152                         vma->vm_end,
153                         flags & VM_READ ? 'r' : '-',
154                         flags & VM_WRITE ? 'w' : '-',
155                         flags & VM_EXEC ? 'x' : '-',
156                         flags & VM_MAYSHARE ? 's' : 'p',
157                         vma->vm_pgoff << PAGE_SHIFT,
158                         MAJOR(dev), MINOR(dev), ino, &len);
159
160         /*
161          * Print the dentry name for named mappings, and a
162          * special [heap] marker for the heap:
163          */
164         if (file) {
165                 pad_len_spaces(m, len);
166                 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n");
167         } else {
168                 const char *name = arch_vma_name(vma);
169                 if (!name) {
170                         if (mm) {
171                                 if (vma->vm_start <= mm->start_brk &&
172                                                 vma->vm_end >= mm->brk) {
173                                         name = "[heap]";
174                                 } else if (vma->vm_start <= mm->start_stack &&
175                                            vma->vm_end >= mm->start_stack) {
176                                         name = "[stack]";
177                                 }
178                         } else {
179                                 name = "[vdso]";
180                         }
181                 }
182                 if (name) {
183                         pad_len_spaces(m, len);
184                         seq_puts(m, name);
185                 }
186         }
187         seq_putc(m, '\n');
188
189         if (mss)
190                 seq_printf(m,
191                            "Size:          %8lu kB\n"
192                            "Rss:           %8lu kB\n"
193                            "Shared_Clean:  %8lu kB\n"
194                            "Shared_Dirty:  %8lu kB\n"
195                            "Private_Clean: %8lu kB\n"
196                            "Private_Dirty: %8lu kB\n",
197                            (vma->vm_end - vma->vm_start) >> 10,
198                            mss->resident >> 10,
199                            mss->shared_clean  >> 10,
200                            mss->shared_dirty  >> 10,
201                            mss->private_clean >> 10,
202                            mss->private_dirty >> 10);
203
204         if (m->count < m->size)  /* vma is copied successfully */
205                 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
206         return 0;
207 }
208
209 static int show_map(struct seq_file *m, void *v)
210 {
211         return show_map_internal(m, v, NULL);
212 }
213
214 static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd,
215                           unsigned long addr, unsigned long end,
216                           void *private)
217 {
218         struct mem_size_stats *mss = private;
219         pte_t *pte, ptent;
220         spinlock_t *ptl;
221         struct page *page;
222
223         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
224         for (; addr != end; pte++, addr += PAGE_SIZE) {
225                 ptent = *pte;
226                 if (!pte_present(ptent))
227                         continue;
228
229                 mss->resident += PAGE_SIZE;
230
231                 page = vm_normal_page(vma, addr, ptent);
232                 if (!page)
233                         continue;
234
235                 if (page_mapcount(page) >= 2) {
236                         if (pte_dirty(ptent))
237                                 mss->shared_dirty += PAGE_SIZE;
238                         else
239                                 mss->shared_clean += PAGE_SIZE;
240                 } else {
241                         if (pte_dirty(ptent))
242                                 mss->private_dirty += PAGE_SIZE;
243                         else
244                                 mss->private_clean += PAGE_SIZE;
245                 }
246         }
247         pte_unmap_unlock(pte - 1, ptl);
248         cond_resched();
249 }
250
251 static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud,
252                                        unsigned long addr, unsigned long end)
253 {
254         pmd_t *pmd;
255         unsigned long next;
256
257         for (pmd = pmd_offset(pud, addr); addr != end;
258              pmd++, addr = next) {
259                 next = pmd_addr_end(addr, end);
260                 if (pmd_none_or_clear_bad(pmd))
261                         continue;
262                 walker->action(walker->vma, pmd, addr, next, walker->private);
263         }
264 }
265
266 static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd,
267                                        unsigned long addr, unsigned long end)
268 {
269         pud_t *pud;
270         unsigned long next;
271
272         for (pud = pud_offset(pgd, addr); addr != end;
273              pud++, addr = next) {
274                 next = pud_addr_end(addr, end);
275                 if (pud_none_or_clear_bad(pud))
276                         continue;
277                 for_each_pmd_in_pud(walker, pud, addr, next);
278         }
279 }
280
281 static inline void for_each_pmd(struct vm_area_struct *vma,
282                                 void (*action)(struct vm_area_struct *, pmd_t *,
283                                                unsigned long, unsigned long,
284                                                void *),
285                                 void *private)
286 {
287         unsigned long addr = vma->vm_start;
288         unsigned long end = vma->vm_end;
289         struct pmd_walker walker = {
290                 .vma            = vma,
291                 .private        = private,
292                 .action         = action,
293         };
294         pgd_t *pgd;
295         unsigned long next;
296
297         for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
298              pgd++, addr = next) {
299                 next = pgd_addr_end(addr, end);
300                 if (pgd_none_or_clear_bad(pgd))
301                         continue;
302                 for_each_pud_in_pgd(&walker, pgd, addr, next);
303         }
304 }
305
306 static int show_smap(struct seq_file *m, void *v)
307 {
308         struct vm_area_struct *vma = v;
309         struct mem_size_stats mss;
310
311         memset(&mss, 0, sizeof mss);
312         if (vma->vm_mm && !is_vm_hugetlb_page(vma))
313                 for_each_pmd(vma, smaps_one_pmd, &mss);
314         return show_map_internal(m, v, &mss);
315 }
316
317 static void *m_start(struct seq_file *m, loff_t *pos)
318 {
319         struct proc_maps_private *priv = m->private;
320         unsigned long last_addr = m->version;
321         struct mm_struct *mm;
322         struct vm_area_struct *vma, *tail_vma = NULL;
323         loff_t l = *pos;
324
325         /* Clear the per syscall fields in priv */
326         priv->task = NULL;
327         priv->tail_vma = NULL;
328
329         /*
330          * We remember last_addr rather than next_addr to hit with
331          * mmap_cache most of the time. We have zero last_addr at
332          * the beginning and also after lseek. We will have -1 last_addr
333          * after the end of the vmas.
334          */
335
336         if (last_addr == -1UL)
337                 return NULL;
338
339         priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
340         if (!priv->task)
341                 return NULL;
342
343         mm = get_task_mm(priv->task);
344         if (!mm)
345                 return NULL;
346
347         priv->tail_vma = tail_vma = get_gate_vma(priv->task);
348         down_read(&mm->mmap_sem);
349
350         /* Start with last addr hint */
351         if (last_addr && (vma = find_vma(mm, last_addr))) {
352                 vma = vma->vm_next;
353                 goto out;
354         }
355
356         /*
357          * Check the vma index is within the range and do
358          * sequential scan until m_index.
359          */
360         vma = NULL;
361         if ((unsigned long)l < mm->map_count) {
362                 vma = mm->mmap;
363                 while (l-- && vma)
364                         vma = vma->vm_next;
365                 goto out;
366         }
367
368         if (l != mm->map_count)
369                 tail_vma = NULL; /* After gate vma */
370
371 out:
372         if (vma)
373                 return vma;
374
375         /* End of vmas has been reached */
376         m->version = (tail_vma != NULL)? 0: -1UL;
377         up_read(&mm->mmap_sem);
378         mmput(mm);
379         return tail_vma;
380 }
381
382 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
383 {
384         if (vma && vma != priv->tail_vma) {
385                 struct mm_struct *mm = vma->vm_mm;
386                 up_read(&mm->mmap_sem);
387                 mmput(mm);
388         }
389 }
390
391 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
392 {
393         struct proc_maps_private *priv = m->private;
394         struct vm_area_struct *vma = v;
395         struct vm_area_struct *tail_vma = priv->tail_vma;
396
397         (*pos)++;
398         if (vma && (vma != tail_vma) && vma->vm_next)
399                 return vma->vm_next;
400         vma_stop(priv, vma);
401         return (vma != tail_vma)? tail_vma: NULL;
402 }
403
404 static void m_stop(struct seq_file *m, void *v)
405 {
406         struct proc_maps_private *priv = m->private;
407         struct vm_area_struct *vma = v;
408
409         vma_stop(priv, vma);
410         if (priv->task)
411                 put_task_struct(priv->task);
412 }
413
414 static struct seq_operations proc_pid_maps_op = {
415         .start  = m_start,
416         .next   = m_next,
417         .stop   = m_stop,
418         .show   = show_map
419 };
420
421 static struct seq_operations proc_pid_smaps_op = {
422         .start  = m_start,
423         .next   = m_next,
424         .stop   = m_stop,
425         .show   = show_smap
426 };
427
428 static int do_maps_open(struct inode *inode, struct file *file,
429                         struct seq_operations *ops)
430 {
431         struct proc_maps_private *priv;
432         int ret = -ENOMEM;
433         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
434         if (priv) {
435                 priv->pid = proc_pid(inode);
436                 ret = seq_open(file, ops);
437                 if (!ret) {
438                         struct seq_file *m = file->private_data;
439                         m->private = priv;
440                 } else {
441                         kfree(priv);
442                 }
443         }
444         return ret;
445 }
446
447 static int maps_open(struct inode *inode, struct file *file)
448 {
449         return do_maps_open(inode, file, &proc_pid_maps_op);
450 }
451
452 const struct file_operations proc_maps_operations = {
453         .open           = maps_open,
454         .read           = seq_read,
455         .llseek         = seq_lseek,
456         .release        = seq_release_private,
457 };
458
459 #ifdef CONFIG_NUMA
460 extern int show_numa_map(struct seq_file *m, void *v);
461
462 static struct seq_operations proc_pid_numa_maps_op = {
463         .start  = m_start,
464         .next   = m_next,
465         .stop   = m_stop,
466         .show   = show_numa_map
467 };
468
469 static int numa_maps_open(struct inode *inode, struct file *file)
470 {
471         return do_maps_open(inode, file, &proc_pid_numa_maps_op);
472 }
473
474 const struct file_operations proc_numa_maps_operations = {
475         .open           = numa_maps_open,
476         .read           = seq_read,
477         .llseek         = seq_lseek,
478         .release        = seq_release_private,
479 };
480 #endif
481
482 static int smaps_open(struct inode *inode, struct file *file)
483 {
484         return do_maps_open(inode, file, &proc_pid_smaps_op);
485 }
486
487 const struct file_operations proc_smaps_operations = {
488         .open           = smaps_open,
489         .read           = seq_read,
490         .llseek         = seq_lseek,
491         .release        = seq_release_private,
492 };