2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
5 #include <linux/kernel_stat.h>
6 #include <linux/spinlock.h>
7 #include <linux/debugfs.h>
8 #include <linux/log2.h>
10 #include <asm/paravirt.h>
12 #include <xen/interface/xen.h>
13 #include <xen/events.h>
18 #ifdef CONFIG_XEN_DEBUG_FS
19 static struct xen_spinlock_stats
23 u32 taken_slow_nested;
24 u32 taken_slow_pickup;
25 u32 taken_slow_spurious;
29 u32 released_slow_kicked;
31 #define HISTO_BUCKETS 20
32 u32 histo_spin_fast[HISTO_BUCKETS+1];
33 u32 histo_spin[HISTO_BUCKETS+1];
41 static unsigned lock_timeout = 1 << 10;
42 #define TIMEOUT lock_timeout
44 static inline void check_zero(void)
46 if (unlikely(zero_stats)) {
47 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
52 #define ADD_STATS(elem, val) \
53 do { check_zero(); spinlock_stats.elem += (val); } while(0)
55 static inline u64 spin_time_start(void)
57 return xen_clocksource_read();
60 static void __spin_time_accum(u64 delta, u32 *array)
62 unsigned index = ilog2(delta);
66 if (index < HISTO_BUCKETS)
69 array[HISTO_BUCKETS]++;
72 static inline void spin_time_accum_fast(u64 start)
74 u32 delta = xen_clocksource_read() - start;
76 __spin_time_accum(delta, spinlock_stats.histo_spin_fast);
77 spinlock_stats.spinning_time += delta;
80 static inline void spin_time_accum(u64 start)
82 u32 delta = xen_clocksource_read() - start;
84 __spin_time_accum(delta, spinlock_stats.histo_spin);
85 spinlock_stats.total_time += delta;
87 #else /* !CONFIG_XEN_DEBUG_FS */
88 #define TIMEOUT (1 << 10)
89 #define ADD_STATS(elem, val) do { (void)(val); } while(0)
91 static inline u64 spin_time_start(void)
96 static inline void spin_time_accum_fast(u64 start)
99 static inline void spin_time_accum(u64 start)
102 #endif /* CONFIG_XEN_DEBUG_FS */
104 struct xen_spinlock {
105 unsigned char lock; /* 0 -> free; 1 -> locked */
106 unsigned short spinners; /* count of waiting cpus */
109 static int xen_spin_is_locked(struct raw_spinlock *lock)
111 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
113 return xl->lock != 0;
116 static int xen_spin_is_contended(struct raw_spinlock *lock)
118 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
120 /* Not strictly true; this is only the count of contended
121 lock-takers entering the slow path. */
122 return xl->spinners != 0;
125 static int xen_spin_trylock(struct raw_spinlock *lock)
127 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
131 : "+q" (old), "+m" (xl->lock) : : "memory");
136 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
137 static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
140 * Mark a cpu as interested in a lock. Returns the CPU's previous
141 * lock of interest, in case we got preempted by an interrupt.
143 static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
145 struct xen_spinlock *prev;
147 prev = __get_cpu_var(lock_spinners);
148 __get_cpu_var(lock_spinners) = xl;
150 wmb(); /* set lock of interest before count */
152 asm(LOCK_PREFIX " incw %0"
153 : "+m" (xl->spinners) : : "memory");
159 * Mark a cpu as no longer interested in a lock. Restores previous
160 * lock of interest (NULL for none).
162 static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
164 asm(LOCK_PREFIX " decw %0"
165 : "+m" (xl->spinners) : : "memory");
166 wmb(); /* decrement count before restoring lock */
167 __get_cpu_var(lock_spinners) = prev;
170 static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
172 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
173 struct xen_spinlock *prev;
174 int irq = __get_cpu_var(lock_kicker_irq);
177 /* If kicker interrupts not initialized yet, just spin */
181 /* announce we're spinning */
182 prev = spinning_lock(xl);
184 ADD_STATS(taken_slow, 1);
185 ADD_STATS(taken_slow_nested, prev != NULL);
189 xen_clear_irq_pending(irq);
191 /* check again make sure it didn't become free while
192 we weren't looking */
193 ret = xen_spin_trylock(lock);
195 ADD_STATS(taken_slow_pickup, 1);
198 * If we interrupted another spinlock while it
199 * was blocking, make sure it doesn't block
200 * without rechecking the lock.
203 xen_set_irq_pending(irq);
208 * Block until irq becomes pending. If we're
209 * interrupted at this point (after the trylock but
210 * before entering the block), then the nested lock
211 * handler guarantees that the irq will be left
212 * pending if there's any chance the lock became free;
213 * xen_poll_irq() returns immediately if the irq is
217 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
218 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
220 kstat_this_cpu.irqs[irq]++;
223 unspinning_lock(xl, prev);
227 static void xen_spin_lock(struct raw_spinlock *lock)
229 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
236 start_spin = spin_time_start();
239 u64 start_spin_fast = spin_time_start();
243 asm("1: xchgb %1,%0\n"
252 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
256 spin_time_accum_fast(start_spin_fast);
257 } while (unlikely(oldval != 0 && (TIMEOUT == ~0 || !xen_spin_lock_slow(lock))));
259 spin_time_accum(start_spin);
262 static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
266 ADD_STATS(released_slow, 1);
268 for_each_online_cpu(cpu) {
269 /* XXX should mix up next cpu selection */
270 if (per_cpu(lock_spinners, cpu) == xl) {
271 ADD_STATS(released_slow_kicked, 1);
272 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
278 static void xen_spin_unlock(struct raw_spinlock *lock)
280 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
282 ADD_STATS(released, 1);
284 smp_wmb(); /* make sure no writes get moved after unlock */
285 xl->lock = 0; /* release lock */
287 /* make sure unlock happens before kick */
290 if (unlikely(xl->spinners))
291 xen_spin_unlock_slow(xl);
294 static irqreturn_t dummy_handler(int irq, void *dev_id)
300 void __cpuinit xen_init_lock_cpu(int cpu)
305 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
306 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
309 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
314 disable_irq(irq); /* make sure it's never delivered */
315 per_cpu(lock_kicker_irq, cpu) = irq;
318 printk("cpu %d spinlock event irq %d\n", cpu, irq);
321 void __init xen_init_spinlocks(void)
323 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
324 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
325 pv_lock_ops.spin_lock = xen_spin_lock;
326 pv_lock_ops.spin_trylock = xen_spin_trylock;
327 pv_lock_ops.spin_unlock = xen_spin_unlock;
330 #ifdef CONFIG_XEN_DEBUG_FS
332 static struct dentry *d_spin_debug;
334 static int __init xen_spinlock_debugfs(void)
336 struct dentry *d_xen = xen_init_debugfs();
341 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
343 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
345 debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout);
347 debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken);
348 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
349 &spinlock_stats.taken_slow);
350 debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug,
351 &spinlock_stats.taken_slow_nested);
352 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
353 &spinlock_stats.taken_slow_pickup);
354 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
355 &spinlock_stats.taken_slow_spurious);
357 debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
358 debugfs_create_u32("released_slow", 0444, d_spin_debug,
359 &spinlock_stats.released_slow);
360 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
361 &spinlock_stats.released_slow_kicked);
363 debugfs_create_u64("time_spinning", 0444, d_spin_debug,
364 &spinlock_stats.spinning_time);
365 debugfs_create_u64("time_total", 0444, d_spin_debug,
366 &spinlock_stats.total_time);
368 xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
369 spinlock_stats.histo_spin, HISTO_BUCKETS + 1);
370 xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
371 spinlock_stats.histo_spin_fast, HISTO_BUCKETS + 1);
375 fs_initcall(xen_spinlock_debugfs);
377 #endif /* CONFIG_XEN_DEBUG_FS */