]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] cpu hotplug: revert init patch submitted for 2.6.17
authorChandra Seetharaman <sekharan@us.ibm.com>
Tue, 27 Jun 2006 09:54:07 +0000 (02:54 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 28 Jun 2006 00:32:40 +0000 (17:32 -0700)
In 2.6.17, there was a problem with cpu_notifiers and XFS.  I provided a
band-aid solution to solve that problem.  In the process, i undid all the
changes you both were making to ensure that these notifiers were available
only at init time (unless CONFIG_HOTPLUG_CPU is defined).

We deferred the real fix to 2.6.18.  Here is a set of patches that fixes the
XFS problem cleanly and makes the cpu notifiers available only at init time
(unless CONFIG_HOTPLUG_CPU is defined).

If CONFIG_HOTPLUG_CPU is defined then cpu notifiers are available at run
time.

This patch reverts the notifier_call changes made in 2.6.17

Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Cc: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
18 files changed:
arch/i386/kernel/cpu/intel_cacheinfo.c
arch/ia64/kernel/palinfo.c
arch/ia64/kernel/salinfo.c
arch/ia64/kernel/topology.c
arch/powerpc/kernel/sysfs.c
arch/x86_64/kernel/mce.c
drivers/base/topology.c
drivers/cpufreq/cpufreq.c
kernel/hrtimer.c
kernel/profile.c
kernel/rcupdate.c
kernel/softirq.c
kernel/softlockup.c
kernel/timer.c
kernel/workqueue.c
mm/page_alloc.c
mm/slab.c
mm/vmscan.c

index 8a92642ea59034353228959c40994b486e576d16..1d4ab10479823f798dba281c46e37e2f1508d147 100644 (file)
@@ -729,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
        return;
 }
 
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
index 859fb37ff49b682799b65b1384b9d05cbc576b88..6386f63c413e60b82449d691cf2f1f17bf333aab 100644 (file)
@@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
        }
 }
 
-static int palinfo_cpu_callback(struct notifier_block *nfb,
+static int __devinit palinfo_cpu_callback(struct notifier_block *nfb,
                                                                unsigned long action,
                                                                void *hcpu)
 {
index 663a186ad194a1abfaa7b00b72a7b93afbcbe107..9d5a823479a3f2dd8d6aef4e56edee4a1d309743 100644 (file)
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = {
 };
 
 #ifdef CONFIG_HOTPLUG_CPU
-static int
+static int __devinit
 salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
 {
        unsigned int i, cpu = (unsigned long)hcpu;
index 5737c9a061efc53366c54f5c9025fdb349acb151..f07c382b57b8c7eec4139cb8ed76acb4a859965d 100644 (file)
@@ -404,7 +404,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
  * When a cpu is hot-plugged, do a check and initiate
  * cache kobject if necessary
  */
-static int cache_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
                unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
index 412ad00e222d37c3dec0938a6847d03b6ef833bc..0231869613ce81c45fd9125059b234ea2baee5d1 100644 (file)
@@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int sysfs_cpu_notify(struct notifier_block *self,
+static int __devinit sysfs_cpu_notify(struct notifier_block *self,
                                      unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned int)(long)hcpu;
index acd5816b1a6f214d2dfc5253d674ed9d81492fe5..efe8500a5b9db7af0a60dd2c59ab5250d7f9353a 100644 (file)
@@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
 #endif
 
 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int
+static __cpuinit int
 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
index 8c52421cbc545b54a6ce1c84c0cf1bf3f734c751..915810f6237eeca929235e1502983080989a3376 100644 (file)
@@ -107,7 +107,7 @@ static int __cpuinit topology_remove_dev(struct sys_device * sys_dev)
        return 0;
 }
 
-static int topology_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
                unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
index 44d1eca83a7250748bf27c637998c3c2973f9657..486ef66647087fe5042ea00a6e9860b8e4a8160d 100644 (file)
@@ -1497,7 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu)
 }
 EXPORT_SYMBOL(cpufreq_update_policy);
 
-static int cpufreq_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
index 55601b3ce60e92717fa6c6d771bc1a67dea7a03e..f9f5319166129bc0f1753e0984d051a54d5401ae 100644 (file)
@@ -833,7 +833,7 @@ static void migrate_hrtimers(int cpu)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int hrtimer_cpu_notify(struct notifier_block *self,
+static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
                                        unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
index 68afe121e5071f0574e37e7b9e20f1d66bd4c290..5a730fdb1a2cecf6b10c2112ba777fbb5fb7c794 100644 (file)
@@ -299,7 +299,7 @@ out:
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static int profile_cpu_callback(struct notifier_block *info,
+static int __devinit profile_cpu_callback(struct notifier_block *info,
                                        unsigned long action, void *__cpu)
 {
        int node, cpu = (unsigned long)__cpu;
index c0e1cb95dd4f3c5caa3d581e75a312efa36a9c3d..a8d80b7048b9177c639401e8cf7e6e4ab986aa22 100644 (file)
@@ -548,7 +548,7 @@ static void __devinit rcu_online_cpu(int cpu)
        tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
 }
 
-static int rcu_cpu_notify(struct notifier_block *self,
+static int __devinit rcu_cpu_notify(struct notifier_block *self,
                                unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
index 9e2f1c6e73d7b341958c74baa8af54169c88c119..db65a311f14e187a5383ed2cf31536ee1f16ab0d 100644 (file)
@@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int cpu_callback(struct notifier_block *nfb,
+static int __devinit cpu_callback(struct notifier_block *nfb,
                                  unsigned long action,
                                  void *hcpu)
 {
index b5c3b94e01ce7408a9f3d0dae310cff0a6939a36..29da0a847ba2a2b8f15d1b9d325300aee0e4c09a 100644 (file)
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
 /*
  * Create/destroy watchdog threads as CPUs come and go:
  */
-static int
+static int __devinit
 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
        int hotcpu = (unsigned long)hcpu;
index 5bb6b7976eecf6c215b561b42b4d79c4197507eb..878194ec8bd6a45043d32f1da3534229eda2d14d 100644 (file)
@@ -1652,7 +1652,7 @@ static void __devinit migrate_timers(int cpu)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int timer_cpu_notify(struct notifier_block *self,
+static int __devinit timer_cpu_notify(struct notifier_block *self,
                                unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
index 565cf7a1febda94b88582c6e9326d782fb29f96c..59f0b42bd89e0e819a1a67145f48a11f2898bbd2 100644 (file)
@@ -559,7 +559,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 }
 
 /* We're holding the cpucontrol mutex here */
-static int workqueue_cpu_callback(struct notifier_block *nfb,
+static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                                  unsigned long action,
                                  void *hcpu)
 {
index 9f86191bb632955a224d94ddf6c53980c64e9244..e9fb2d4064c892e4bdd51287fa79a1d7ce417e27 100644 (file)
@@ -2009,7 +2009,7 @@ static inline void free_zone_pagesets(int cpu)
        }
 }
 
-static int pageset_cpuup_callback(struct notifier_block *nfb,
+static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
                unsigned long action,
                void *hcpu)
 {
index 47982c2d9f39929ef633f00b81c30cb26ccecd07..631c0feb9645a4c0b83de61ad3af548633791cfb 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1073,7 +1073,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 
 #endif
 
-static int cpuup_callback(struct notifier_block *nfb,
+static int __devinit cpuup_callback(struct notifier_block *nfb,
                                    unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
index f03da33d91475ae7e50e706df92819783bb22207..eeacb0d695c35233e57688e4d20314a149d1d22c 100644 (file)
@@ -1450,7 +1450,7 @@ out:
    not required for correctness.  So if the last cpu in a node goes
    away, we get changed to run anywhere: as the first one comes back,
    restore their cpu bindings. */
-static int cpu_callback(struct notifier_block *nfb,
+static int __devinit cpu_callback(struct notifier_block *nfb,
                                  unsigned long action, void *hcpu)
 {
        pg_data_t *pgdat;