- Add comments explaing how drain_pages() works.
- Eliminate useless functions
- Rename drain_all_local_pages to drain_all_pages(). It does drain
all pages not only those of the local processor.
- Eliminate useless interrupt off / on sequences. drain_pages()
disables interrupts on its own. The execution thread is
pinned to processor by the caller. So there is no need to
disable interrupts.
- Put drain_all_pages() declaration in gfp.h and remove the
declarations from suspend.h and from mm/memory_hotplug.c
- Make software suspend call drain_all_pages(). The draining
of processor local pages is may not the right approach if
software suspend wants to support SMP. If they call drain_all_pages
then we can make drain_pages() static.
[akpm@linux-foundation.org: fix build]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Daniel Walker <dwalker@mvista.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
void page_alloc_init(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void page_alloc_init(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
+void drain_all_pages(void);
+void drain_local_pages(void *dummy);
#endif /* __LINUX_GFP_H */
#endif /* __LINUX_GFP_H */
-extern void drain_local_pages(void);
extern void mark_free_pages(struct zone *zone);
/**
extern void mark_free_pages(struct zone *zone);
/**
printk(KERN_INFO "PM: Creating hibernation image: \n");
printk(KERN_INFO "PM: Creating hibernation image: \n");
+ drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
/* During allocating of suspend pagedir, new cold pages may appear.
* Kill them.
*/
/* During allocating of suspend pagedir, new cold pages may appear.
* Kill them.
*/
+ drain_local_pages(NULL);
copy_data_pages(©_bm, &orig_bm);
/*
copy_data_pages(©_bm, &orig_bm);
/*
-extern void drain_all_local_pages(void);
-
int offline_pages(unsigned long start_pfn,
unsigned long end_pfn, unsigned long timeout)
{
int offline_pages(unsigned long start_pfn,
unsigned long end_pfn, unsigned long timeout)
{
lru_add_drain_all();
flush_scheduled_work();
cond_resched();
lru_add_drain_all();
flush_scheduled_work();
cond_resched();
- drain_all_local_pages();
}
pfn = scan_lru_pages(start_pfn, end_pfn);
}
pfn = scan_lru_pages(start_pfn, end_pfn);
flush_scheduled_work();
yield();
/* drain pcp pages , this is synchrouns. */
flush_scheduled_work();
yield();
/* drain pcp pages , this is synchrouns. */
- drain_all_local_pages();
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
-static void __drain_pages(unsigned int cpu)
+/*
+ * Drain pages of the indicated processor.
+ *
+ * The processor must either be the current processor and the
+ * thread pinned to the current processor or a processor that
+ * is not online.
+ */
+static void drain_pages(unsigned int cpu)
{
unsigned long flags;
struct zone *zone;
{
unsigned long flags;
struct zone *zone;
+/*
+ * Spill all of this CPU's per-cpu pages back into the buddy allocator.
+ */
+void drain_local_pages(void *arg)
+{
+ drain_pages(smp_processor_id());
+}
+
+/*
+ * Spill all the per-cpu pages from all CPUs back into the buddy allocator
+ */
+void drain_all_pages(void)
+{
+ on_each_cpu(drain_local_pages, NULL, 0, 1);
+}
+
#ifdef CONFIG_HIBERNATION
void mark_free_pages(struct zone *zone)
#ifdef CONFIG_HIBERNATION
void mark_free_pages(struct zone *zone)
-/*
- * Spill all of this CPU's per-cpu pages back into the buddy allocator.
- */
-void drain_local_pages(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __drain_pages(smp_processor_id());
- local_irq_restore(flags);
-}
-
-void smp_drain_local_pages(void *arg)
-{
- drain_local_pages();
-}
-
-/*
- * Spill all the per-cpu pages from all CPUs back into the buddy allocator
- */
-void drain_all_local_pages(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __drain_pages(smp_processor_id());
- local_irq_restore(flags);
-
- smp_call_function(smp_drain_local_pages, NULL, 0, 1);
-}
-
/*
* Free a 0-order page
*/
/*
* Free a 0-order page
*/
cond_resched();
if (order != 0)
cond_resched();
if (order != 0)
- drain_all_local_pages();
if (likely(did_some_progress)) {
page = get_page_from_freelist(gfp_mask, order,
if (likely(did_some_progress)) {
page = get_page_from_freelist(gfp_mask, order,
int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- local_irq_disable();
- __drain_pages(cpu);
+ drain_pages(cpu);
+
+ /*
+ * Spill the event counters of the dead processor
+ * into the current processors event counters.
+ * This artificially elevates the count of the current
+ * processor.
+ */
+
+ /*
+ * Zero the differential counters of the dead processor
+ * so that the vm statistics are consistent.
+ *
+ * This is only okay since the processor is dead and cannot
+ * race with what we are doing.
+ */
refresh_cpu_vm_stats(cpu);
}
return NOTIFY_OK;
refresh_cpu_vm_stats(cpu);
}
return NOTIFY_OK;
out:
spin_unlock_irqrestore(&zone->lock, flags);
if (!ret)
out:
spin_unlock_irqrestore(&zone->lock, flags);
if (!ret)
- drain_all_local_pages();