1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
19 #define DMA32_ZONE(xx)
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
25 #define HIGHMEM_ZONE(xx)
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
31 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
40 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
41 #ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
44 #ifdef CONFIG_UNEVICTABLE_LRU
45 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
46 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
47 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
52 extern const struct seq_operations fragmentation_op;
53 extern const struct seq_operations pagetypeinfo_op;
54 extern const struct seq_operations zoneinfo_op;
55 extern const struct seq_operations vmstat_op;
56 extern int sysctl_stat_interval;
58 #ifdef CONFIG_VM_EVENT_COUNTERS
60 * Light weight per cpu counter implementation.
62 * Counters should only be incremented and no critical kernel component
63 * should rely on the counter values.
65 * Counters are handled completely inline. On many platforms the code
66 * generated will simply be the increment of a global address.
69 struct vm_event_state {
70 unsigned long event[NR_VM_EVENT_ITEMS];
73 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
75 static inline void __count_vm_event(enum vm_event_item item)
77 __get_cpu_var(vm_event_states).event[item]++;
80 static inline void count_vm_event(enum vm_event_item item)
82 get_cpu_var(vm_event_states).event[item]++;
86 static inline void __count_vm_events(enum vm_event_item item, long delta)
88 __get_cpu_var(vm_event_states).event[item] += delta;
91 static inline void count_vm_events(enum vm_event_item item, long delta)
93 get_cpu_var(vm_event_states).event[item] += delta;
97 extern void all_vm_events(unsigned long *);
99 extern void vm_events_fold_cpu(int cpu);
101 static inline void vm_events_fold_cpu(int cpu)
108 /* Disable counters */
109 static inline void count_vm_event(enum vm_event_item item)
112 static inline void count_vm_events(enum vm_event_item item, long delta)
115 static inline void __count_vm_event(enum vm_event_item item)
118 static inline void __count_vm_events(enum vm_event_item item, long delta)
121 static inline void all_vm_events(unsigned long *ret)
124 static inline void vm_events_fold_cpu(int cpu)
128 #endif /* CONFIG_VM_EVENT_COUNTERS */
130 #define __count_zone_vm_events(item, zone, delta) \
131 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
132 zone_idx(zone), delta)
135 * Zone based page accounting with per cpu differentials.
137 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
139 static inline void zone_page_state_add(long x, struct zone *zone,
140 enum zone_stat_item item)
142 atomic_long_add(x, &zone->vm_stat[item]);
143 atomic_long_add(x, &vm_stat[item]);
146 static inline unsigned long global_page_state(enum zone_stat_item item)
148 long x = atomic_long_read(&vm_stat[item]);
156 static inline unsigned long zone_page_state(struct zone *zone,
157 enum zone_stat_item item)
159 long x = atomic_long_read(&zone->vm_stat[item]);
167 extern unsigned long global_lru_pages(void);
169 static inline unsigned long zone_lru_pages(struct zone *zone)
171 return (zone_page_state(zone, NR_ACTIVE_ANON)
172 + zone_page_state(zone, NR_ACTIVE_FILE)
173 + zone_page_state(zone, NR_INACTIVE_ANON)
174 + zone_page_state(zone, NR_INACTIVE_FILE));
179 * Determine the per node value of a stat item. This function
180 * is called frequently in a NUMA machine, so try to be as
181 * frugal as possible.
183 static inline unsigned long node_page_state(int node,
184 enum zone_stat_item item)
186 struct zone *zones = NODE_DATA(node)->node_zones;
189 #ifdef CONFIG_ZONE_DMA
190 zone_page_state(&zones[ZONE_DMA], item) +
192 #ifdef CONFIG_ZONE_DMA32
193 zone_page_state(&zones[ZONE_DMA32], item) +
195 #ifdef CONFIG_HIGHMEM
196 zone_page_state(&zones[ZONE_HIGHMEM], item) +
198 zone_page_state(&zones[ZONE_NORMAL], item) +
199 zone_page_state(&zones[ZONE_MOVABLE], item);
202 extern void zone_statistics(struct zone *, struct zone *);
206 #define node_page_state(node, item) global_page_state(item)
207 #define zone_statistics(_zl,_z) do { } while (0)
209 #endif /* CONFIG_NUMA */
211 #define __add_zone_page_state(__z, __i, __d) \
212 __mod_zone_page_state(__z, __i, __d)
213 #define __sub_zone_page_state(__z, __i, __d) \
214 __mod_zone_page_state(__z, __i,-(__d))
216 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
217 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
219 static inline void zap_zone_vm_stats(struct zone *zone)
221 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
224 extern void inc_zone_state(struct zone *, enum zone_stat_item);
227 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
228 void __inc_zone_page_state(struct page *, enum zone_stat_item);
229 void __dec_zone_page_state(struct page *, enum zone_stat_item);
231 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
232 void inc_zone_page_state(struct page *, enum zone_stat_item);
233 void dec_zone_page_state(struct page *, enum zone_stat_item);
235 extern void inc_zone_state(struct zone *, enum zone_stat_item);
236 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
237 extern void dec_zone_state(struct zone *, enum zone_stat_item);
238 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
240 void refresh_cpu_vm_stats(int);
241 #else /* CONFIG_SMP */
244 * We do not maintain differentials in a single processor configuration.
245 * The functions directly modify the zone and global counters.
247 static inline void __mod_zone_page_state(struct zone *zone,
248 enum zone_stat_item item, int delta)
250 zone_page_state_add(delta, zone, item);
253 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
255 atomic_long_inc(&zone->vm_stat[item]);
256 atomic_long_inc(&vm_stat[item]);
259 static inline void __inc_zone_page_state(struct page *page,
260 enum zone_stat_item item)
262 __inc_zone_state(page_zone(page), item);
265 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
267 atomic_long_dec(&zone->vm_stat[item]);
268 atomic_long_dec(&vm_stat[item]);
271 static inline void __dec_zone_page_state(struct page *page,
272 enum zone_stat_item item)
274 __dec_zone_state(page_zone(page), item);
278 * We only use atomic operations to update counters. So there is no need to
279 * disable interrupts.
281 #define inc_zone_page_state __inc_zone_page_state
282 #define dec_zone_page_state __dec_zone_page_state
283 #define mod_zone_page_state __mod_zone_page_state
285 static inline void refresh_cpu_vm_stats(int cpu) { }
288 #endif /* _LINUX_VMSTAT_H */