]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/smp.c
x86: convert to generic helpers for IPI function calls
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / smp.c
index 0cb7aadc87cd44a5e0c2402731cadbfd8c88ce8d..575aa3d7248ad2d6949d296c3e2fc5d922c85dfe 100644 (file)
@@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu)
        send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
 }
 
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
-       void (*func) (void *info);
-       void *info;
-       atomic_t started;
-       atomic_t finished;
-       int wait;
-};
-
-void lock_ipi_call_lock(void)
+void native_send_call_func_single_ipi(int cpu)
 {
-       spin_lock_irq(&call_lock);
-}
-
-void unlock_ipi_call_lock(void)
-{
-       spin_unlock_irq(&call_lock);
-}
-
-static struct call_data_struct *call_data;
-
-static void __smp_call_function(void (*func) (void *info), void *info,
-                               int nonatomic, int wait)
-{
-       struct call_data_struct data;
-       int cpus = num_online_cpus() - 1;
-
-       if (!cpus)
-               return;
-
-       data.func = func;
-       data.info = info;
-       atomic_set(&data.started, 0);
-       data.wait = wait;
-       if (wait)
-               atomic_set(&data.finished, 0);
-
-       call_data = &data;
-       mb();
-
-       /* Send a message to all other CPUs and wait for them to respond */
-       send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-
-       /* Wait for response */
-       while (atomic_read(&data.started) != cpus)
-               cpu_relax();
-
-       if (wait)
-               while (atomic_read(&data.finished) != cpus)
-                       cpu_relax();
+       send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
 }
 
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on.  Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
-  * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-static int
-native_smp_call_function_mask(cpumask_t mask,
-                             void (*func)(void *), void *info,
-                             int wait)
+void native_send_call_func_ipi(cpumask_t mask)
 {
-       struct call_data_struct data;
        cpumask_t allbutself;
-       int cpus;
-
-       /* Can deadlock when called with interrupts disabled */
-       WARN_ON(irqs_disabled());
-
-       /* Holding any lock stops cpus from going down. */
-       spin_lock(&call_lock);
 
        allbutself = cpu_online_map;
        cpu_clear(smp_processor_id(), allbutself);
 
-       cpus_and(mask, mask, allbutself);
-       cpus = cpus_weight(mask);
-
-       if (!cpus) {
-               spin_unlock(&call_lock);
-               return 0;
-       }
-
-       data.func = func;
-       data.info = info;
-       atomic_set(&data.started, 0);
-       data.wait = wait;
-       if (wait)
-               atomic_set(&data.finished, 0);
-
-       call_data = &data;
-       wmb();
-
-       /* Send a message to other CPUs */
        if (cpus_equal(mask, allbutself) &&
            cpus_equal(cpu_online_map, cpu_callout_map))
                send_IPI_allbutself(CALL_FUNCTION_VECTOR);
        else
                send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
-
-       /* Wait for response */
-       while (atomic_read(&data.started) != cpus)
-               cpu_relax();
-
-       if (wait)
-               while (atomic_read(&data.finished) != cpus)
-                       cpu_relax();
-       spin_unlock(&call_lock);
-
-       return 0;
 }
 
 static void stop_this_cpu(void *dummy)
@@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy)
 
 static void native_smp_send_stop(void)
 {
-       int nolock;
        unsigned long flags;
 
        if (reboot_force)
                return;
 
-       /* Don't deadlock on the call lock in panic */
-       nolock = !spin_trylock(&call_lock);
+       smp_call_function(stop_this_cpu, NULL, 0, 0);
        local_irq_save(flags);
-       __smp_call_function(stop_this_cpu, NULL, 0, 0);
-       if (!nolock)
-               spin_unlock(&call_lock);
        disable_local_APIC();
        local_irq_restore(flags);
 }
@@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
 
 void smp_call_function_interrupt(struct pt_regs *regs)
 {
-       void (*func) (void *info) = call_data->func;
-       void *info = call_data->info;
-       int wait = call_data->wait;
-
        ack_APIC_irq();
-       /*
-        * Notify initiating CPU that I've grabbed the data and am
-        * about to execute the function
-        */
-       mb();
-       atomic_inc(&call_data->started);
-       /*
-        * At this point the info structure may be out of scope unless wait==1
-        */
        irq_enter();
-       (*func)(info);
+       generic_smp_call_function_interrupt();
 #ifdef CONFIG_X86_32
        __get_cpu_var(irq_stat).irq_call_count++;
 #else
        add_pda(irq_call_count, 1);
 #endif
        irq_exit();
+}
 
-       if (wait) {
-               mb();
-               atomic_inc(&call_data->finished);
-       }
+void smp_call_function_single_interrupt(void)
+{
+       ack_APIC_irq();
+       irq_enter();
+       generic_smp_call_function_single_interrupt();
+#ifdef CONFIG_X86_32
+       __get_cpu_var(irq_stat).irq_call_count++;
+#else
+       add_pda(irq_call_count, 1);
+#endif
+       irq_exit();
 }
 
 struct smp_ops smp_ops = {
@@ -338,7 +219,8 @@ struct smp_ops smp_ops = {
 
        .smp_send_stop = native_smp_send_stop,
        .smp_send_reschedule = native_smp_send_reschedule,
-       .smp_call_function_mask = native_smp_call_function_mask,
+
+       .send_call_func_ipi = native_send_call_func_ipi,
+       .send_call_func_single_ipi = native_send_call_func_single_ipi,
 };
 EXPORT_SYMBOL_GPL(smp_ops);
-