2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
5 #include <linux/kernel_stat.h>
6 #include <linux/spinlock.h>
8 #include <asm/paravirt.h>
10 #include <xen/interface/xen.h>
11 #include <xen/events.h>
16 unsigned char lock; /* 0 -> free; 1 -> locked */
17 unsigned short spinners; /* count of waiting cpus */
20 static int xen_spin_is_locked(struct raw_spinlock *lock)
22 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
27 static int xen_spin_is_contended(struct raw_spinlock *lock)
29 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
31 /* Not strictly true; this is only the count of contended
32 lock-takers entering the slow path. */
33 return xl->spinners != 0;
36 static int xen_spin_trylock(struct raw_spinlock *lock)
38 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
42 : "+q" (old), "+m" (xl->lock) : : "memory");
47 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
48 static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
51 * Mark a cpu as interested in a lock. Returns the CPU's previous
52 * lock of interest, in case we got preempted by an interrupt.
54 static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
56 struct xen_spinlock *prev;
58 prev = __get_cpu_var(lock_spinners);
59 __get_cpu_var(lock_spinners) = xl;
61 wmb(); /* set lock of interest before count */
63 asm(LOCK_PREFIX " incw %0"
64 : "+m" (xl->spinners) : : "memory");
70 * Mark a cpu as no longer interested in a lock. Restores previous
71 * lock of interest (NULL for none).
73 static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
75 asm(LOCK_PREFIX " decw %0"
76 : "+m" (xl->spinners) : : "memory");
77 wmb(); /* decrement count before restoring lock */
78 __get_cpu_var(lock_spinners) = prev;
81 static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
83 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
84 struct xen_spinlock *prev;
85 int irq = __get_cpu_var(lock_kicker_irq);
88 /* If kicker interrupts not initialized yet, just spin */
92 /* announce we're spinning */
93 prev = spinning_lock(xl);
97 xen_clear_irq_pending(irq);
99 /* check again make sure it didn't become free while
100 we weren't looking */
101 ret = xen_spin_trylock(lock);
104 * If we interrupted another spinlock while it
105 * was blocking, make sure it doesn't block
106 * without rechecking the lock.
109 xen_set_irq_pending(irq);
114 * Block until irq becomes pending. If we're
115 * interrupted at this point (after the trylock but
116 * before entering the block), then the nested lock
117 * handler guarantees that the irq will be left
118 * pending if there's any chance the lock became free;
119 * xen_poll_irq() returns immediately if the irq is
123 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
125 kstat_this_cpu.irqs[irq]++;
128 unspinning_lock(xl, prev);
132 static void xen_spin_lock(struct raw_spinlock *lock)
134 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
141 asm("1: xchgb %1,%0\n"
150 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
154 } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
157 static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
161 for_each_online_cpu(cpu) {
162 /* XXX should mix up next cpu selection */
163 if (per_cpu(lock_spinners, cpu) == xl) {
164 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
170 static void xen_spin_unlock(struct raw_spinlock *lock)
172 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
174 smp_wmb(); /* make sure no writes get moved after unlock */
175 xl->lock = 0; /* release lock */
177 /* make sure unlock happens before kick */
180 if (unlikely(xl->spinners))
181 xen_spin_unlock_slow(xl);
184 static irqreturn_t dummy_handler(int irq, void *dev_id)
190 void __cpuinit xen_init_lock_cpu(int cpu)
195 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
196 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
199 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
204 disable_irq(irq); /* make sure it's never delivered */
205 per_cpu(lock_kicker_irq, cpu) = irq;
208 printk("cpu %d spinlock event irq %d\n", cpu, irq);
211 void __init xen_init_spinlocks(void)
213 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
214 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
215 pv_lock_ops.spin_lock = xen_spin_lock;
216 pv_lock_ops.spin_trylock = xen_spin_trylock;
217 pv_lock_ops.spin_unlock = xen_spin_unlock;