]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/xen/smp.c
x86: split spinlock implementations out into their own files
[linux-2.6-omap-h63xx.git] / arch / x86 / xen / smp.c
index d8faf79a0a1da22b57d4f39ee58abc1ca9b908d7..baca7f2fbd8a90fda8f031e8c860df88f9c34b56 100644 (file)
@@ -15,7 +15,6 @@
  * This does not handle HOTPLUG_CPU yet.
  */
 #include <linux/sched.h>
-#include <linux/kernel_stat.h>
 #include <linux/err.h>
 #include <linux/smp.h>
 
@@ -36,8 +35,6 @@
 #include "xen-ops.h"
 #include "mmu.h"
 
-static void __cpuinit xen_init_lock_cpu(int cpu);
-
 cpumask_t xen_cpu_initialized_map;
 
 static DEFINE_PER_CPU(int, resched_irq);
@@ -419,170 +416,6 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-struct xen_spinlock {
-       unsigned char lock;             /* 0 -> free; 1 -> locked */
-       unsigned short spinners;        /* count of waiting cpus */
-};
-
-static int xen_spin_is_locked(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
-       return xl->lock != 0;
-}
-
-static int xen_spin_is_contended(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
-       /* Not strictly true; this is only the count of contended
-          lock-takers entering the slow path. */
-       return xl->spinners != 0;
-}
-
-static int xen_spin_trylock(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-       u8 old = 1;
-
-       asm("xchgb %b0,%1"
-           : "+q" (old), "+m" (xl->lock) : : "memory");
-
-       return old == 0;
-}
-
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
-
-static inline void spinning_lock(struct xen_spinlock *xl)
-{
-       __get_cpu_var(lock_spinners) = xl;
-       wmb();                  /* set lock of interest before count */
-       asm(LOCK_PREFIX " incw %0"
-           : "+m" (xl->spinners) : : "memory");
-}
-
-static inline void unspinning_lock(struct xen_spinlock *xl)
-{
-       asm(LOCK_PREFIX " decw %0"
-           : "+m" (xl->spinners) : : "memory");
-       wmb();                  /* decrement count before clearing lock */
-       __get_cpu_var(lock_spinners) = NULL;
-}
-
-static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-       int irq = __get_cpu_var(lock_kicker_irq);
-       int ret;
-
-       /* If kicker interrupts not initialized yet, just spin */
-       if (irq == -1)
-               return 0;
-
-       /* announce we're spinning */
-       spinning_lock(xl);
-
-       /* clear pending */
-       xen_clear_irq_pending(irq);
-
-       /* check again make sure it didn't become free while
-          we weren't looking  */
-       ret = xen_spin_trylock(lock);
-       if (ret)
-               goto out;
-
-       /* block until irq becomes pending */
-       xen_poll_irq(irq);
-       kstat_this_cpu.irqs[irq]++;
-
-out:
-       unspinning_lock(xl);
-       return ret;
-}
-
-static void xen_spin_lock(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-       int timeout;
-       u8 oldval;
-
-       do {
-               timeout = 1 << 10;
-
-               asm("1: xchgb %1,%0\n"
-                   "   testb %1,%1\n"
-                   "   jz 3f\n"
-                   "2: rep;nop\n"
-                   "   cmpb $0,%0\n"
-                   "   je 1b\n"
-                   "   dec %2\n"
-                   "   jnz 2b\n"
-                   "3:\n"
-                   : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
-                   : "1" (1)
-                   : "memory");
-
-       } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
-}
-
-static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
-{
-       int cpu;
-
-       for_each_online_cpu(cpu) {
-               /* XXX should mix up next cpu selection */
-               if (per_cpu(lock_spinners, cpu) == xl) {
-                       xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
-                       break;
-               }
-       }
-}
-
-static void xen_spin_unlock(struct raw_spinlock *lock)
-{
-       struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
-       smp_wmb();              /* make sure no writes get moved after unlock */
-       xl->lock = 0;           /* release lock */
-
-       /* make sure unlock happens before kick */
-       barrier();
-
-       if (unlikely(xl->spinners))
-               xen_spin_unlock_slow(xl);
-}
-
-static __cpuinit void xen_init_lock_cpu(int cpu)
-{
-       int irq;
-       const char *name;
-
-       name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
-       irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
-                                    cpu,
-                                    xen_reschedule_interrupt,
-                                    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
-                                    name,
-                                    NULL);
-
-       if (irq >= 0) {
-               disable_irq(irq); /* make sure it's never delivered */
-               per_cpu(lock_kicker_irq, cpu) = irq;
-       }
-
-       printk("cpu %d spinlock event irq %d\n", cpu, irq);
-}
-
-static void __init xen_init_spinlocks(void)
-{
-       pv_lock_ops.spin_is_locked = xen_spin_is_locked;
-       pv_lock_ops.spin_is_contended = xen_spin_is_contended;
-       pv_lock_ops.spin_lock = xen_spin_lock;
-       pv_lock_ops.spin_trylock = xen_spin_trylock;
-       pv_lock_ops.spin_unlock = xen_spin_unlock;
-}
-
 static const struct smp_ops xen_smp_ops __initdata = {
        .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
        .smp_prepare_cpus = xen_smp_prepare_cpus,