]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/smpboot.c
x86: remove additional_cpus configurability
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / smpboot.c
index 06f1407d55423507c920b9d1c0c19827098cc057..8dd201c313296a6f127261e2f34a64abf26ee0ab 100644 (file)
@@ -89,7 +89,7 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
 #define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
 #define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
 #else
-struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
 #define get_idle_for_cpu(x)      (idle_thread_array[(x)])
 #define set_idle_for_cpu(x, p)   (idle_thread_array[(x)] = (p))
 #endif
@@ -124,13 +124,12 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
 
 static atomic_t init_deasserted;
 
-static int boot_cpu_logical_apicid;
 
 /* representing cpus for which sibling maps can be computed */
 static cpumask_t cpu_sibling_setup_map;
 
 /* Set if we find a B stepping CPU */
-int __cpuinitdata smp_b_stepping;
+static int __cpuinitdata smp_b_stepping;
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
 
@@ -166,6 +165,8 @@ static void unmap_cpu_to_node(int cpu)
 #endif
 
 #ifdef CONFIG_X86_32
+static int boot_cpu_logical_apicid;
+
 u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
                                        { [0 ... NR_CPUS-1] = BAD_APICID };
 
@@ -211,7 +212,7 @@ static void __cpuinit smp_callin(void)
        /*
         * (This works even if the APIC is not enabled.)
         */
-       phys_id = GET_APIC_ID(read_apic_id());
+       phys_id = read_apic_id();
        cpuid = smp_processor_id();
        if (cpu_isset(cpuid, cpu_callin_map)) {
                panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
@@ -258,6 +259,7 @@ static void __cpuinit smp_callin(void)
        end_local_APIC_setup();
        map_cpu_to_logical_apicid();
 
+       notify_cpu_starting(cpuid);
        /*
         * Get our bogomips.
         *
@@ -332,14 +334,17 @@ static void __cpuinit start_secondary(void *unused)
         * does not change while we are assigning vectors to cpus.  Holding
         * this lock ensures we don't half assign or remove an irq from a cpu.
         */
-       ipi_call_lock_irq();
+       ipi_call_lock();
        lock_vector_lock();
        __setup_vector_irq(smp_processor_id());
        cpu_set(smp_processor_id(), cpu_online_map);
        unlock_vector_lock();
-       ipi_call_unlock_irq();
+       ipi_call_unlock();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 
+       /* enable local interrupts */
+       local_irq_enable();
+
        setup_secondary_clock();
 
        wmb();
@@ -551,8 +556,7 @@ static inline void __inquire_remote_apic(int apicid)
                        printk(KERN_CONT
                               "a previous APIC delivery may have failed\n");
 
-               apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
-               apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
+               apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
 
                timeout = 0;
                do {
@@ -584,11 +588,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
        int maxlvt;
 
        /* Target chip */
-       apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
-
        /* Boot on the stack */
        /* Kick the second */
-       apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
+       apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
 
        pr_debug("Waiting for send to finish...\n");
        send_status = safe_apic_wait_icr_idle();
@@ -597,10 +599,12 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
         * Give the other CPU some time to accept the IPI.
         */
        udelay(200);
-       maxlvt = lapic_get_maxlvt();
-       if (maxlvt > 3)                 /* Due to the Pentium erratum 3AP.  */
-               apic_write(APIC_ESR, 0);
-       accept_status = (apic_read(APIC_ESR) & 0xEF);
+       if (APIC_INTEGRATED(apic_version[phys_apicid])) {
+               maxlvt = lapic_get_maxlvt();
+               if (maxlvt > 3)                 /* Due to the Pentium erratum 3AP.  */
+                       apic_write(APIC_ESR, 0);
+               accept_status = (apic_read(APIC_ESR) & 0xEF);
+       }
        pr_debug("NMI sent.\n");
 
        if (send_status)
@@ -641,13 +645,11 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
        /*
         * Turn INIT on target chip
         */
-       apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
        /*
         * Send IPI
         */
-       apic_write(APIC_ICR,
-                  APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT);
+       apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
+                      phys_apicid);
 
        pr_debug("Waiting for send to finish...\n");
        send_status = safe_apic_wait_icr_idle();
@@ -657,10 +659,8 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
        pr_debug("Deasserting INIT.\n");
 
        /* Target chip */
-       apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
        /* Send IPI */
-       apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
+       apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
 
        pr_debug("Waiting for send to finish...\n");
        send_status = safe_apic_wait_icr_idle();
@@ -703,11 +703,10 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
                 */
 
                /* Target chip */
-               apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
                /* Boot on the stack */
                /* Kick the second */
-               apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12));
+               apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
+                              phys_apicid);
 
                /*
                 * Give the other CPU some time to accept the IPI.
@@ -1176,10 +1175,17 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
         * Setup boot CPU information
         */
        smp_store_cpu_info(0); /* Final full version of the data */
+#ifdef CONFIG_X86_32
        boot_cpu_logical_apicid = logical_smp_processor_id();
+#endif
        current_thread_info()->cpu = 0;  /* needed? */
        set_cpu_sibling_map(0);
 
+#ifdef CONFIG_X86_64
+       enable_IR_x2apic();
+       setup_apic_routing();
+#endif
+
        if (smp_sanity_check(max_cpus) < 0) {
                printk(KERN_INFO "SMP disabled\n");
                disable_smp();
@@ -1187,9 +1193,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        }
 
        preempt_disable();
-       if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) {
+       if (read_apic_id() != boot_cpu_physical_apicid) {
                panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
-                    GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid);
+                    read_apic_id(), boot_cpu_physical_apicid);
                /* Or can we switch back to PIC here? */
        }
        preempt_enable();
@@ -1255,38 +1261,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
        check_nmi_watchdog();
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void remove_siblinginfo(int cpu)
-{
-       int sibling;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-       for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
-               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
-               /*/
-                * last thread sibling in this cpu core going down
-                */
-               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
-                       cpu_data(sibling).booted_cores--;
-       }
-
-       for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
-       cpus_clear(per_cpu(cpu_core_map, cpu));
-       c->phys_proc_id = 0;
-       c->cpu_core_id = 0;
-       cpu_clear(cpu, cpu_sibling_setup_map);
-}
-
-static int additional_cpus __initdata = -1;
-
-static __init int setup_additional_cpus(char *s)
-{
-       return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
-}
-early_param("additional_cpus", setup_additional_cpus);
+static int additional_cpus = -1;
 
 /*
  * cpu_possible_map should be static, it cannot change as cpu's
@@ -1314,16 +1289,13 @@ __init void prefill_possible_map(void)
        if (!num_processors)
                num_processors = 1;
 
-#ifdef CONFIG_HOTPLUG_CPU
        if (additional_cpus == -1) {
                if (disabled_cpus > 0)
                        additional_cpus = disabled_cpus;
                else
                        additional_cpus = 0;
        }
-#else
-       additional_cpus = 0;
-#endif
+
        possible = num_processors + additional_cpus;
        if (possible > NR_CPUS)
                possible = NR_CPUS;
@@ -1337,6 +1309,31 @@ __init void prefill_possible_map(void)
        nr_cpu_ids = possible;
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void remove_siblinginfo(int cpu)
+{
+       int sibling;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+       for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
+               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+               /*/
+                * last thread sibling in this cpu core going down
+                */
+               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
+                       cpu_data(sibling).booted_cores--;
+       }
+
+       for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
+       cpus_clear(per_cpu(cpu_sibling_map, cpu));
+       cpus_clear(per_cpu(cpu_core_map, cpu));
+       c->phys_proc_id = 0;
+       c->cpu_core_id = 0;
+       cpu_clear(cpu, cpu_sibling_setup_map);
+}
+
 static void __ref remove_cpu_from_maps(int cpu)
 {
        cpu_clear(cpu, cpu_online_map);