2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
5 #include <linux/kernel_stat.h>
6 #include <linux/spinlock.h>
7 #include <linux/debugfs.h>
8 #include <linux/log2.h>
10 #include <asm/paravirt.h>
12 #include <xen/interface/xen.h>
13 #include <xen/events.h>
18 #ifdef CONFIG_XEN_DEBUG_FS
19 static struct xen_spinlock_stats
23 u32 taken_slow_nested;
24 u32 taken_slow_pickup;
25 u32 taken_slow_spurious;
26 u32 taken_slow_irqenable;
30 u32 released_slow_kicked;
32 #define HISTO_BUCKETS 20
33 u32 histo_spin_fast[HISTO_BUCKETS+1];
34 u32 histo_spin[HISTO_BUCKETS+1];
42 static unsigned lock_timeout = 1 << 10;
43 #define TIMEOUT lock_timeout
45 static inline void check_zero(void)
47 if (unlikely(zero_stats)) {
48 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
53 #define ADD_STATS(elem, val) \
54 do { check_zero(); spinlock_stats.elem += (val); } while(0)
56 static inline u64 spin_time_start(void)
58 return xen_clocksource_read();
61 static void __spin_time_accum(u64 delta, u32 *array)
63 unsigned index = ilog2(delta);
67 if (index < HISTO_BUCKETS)
70 array[HISTO_BUCKETS]++;
73 static inline void spin_time_accum_fast(u64 start)
75 u32 delta = xen_clocksource_read() - start;
77 __spin_time_accum(delta, spinlock_stats.histo_spin_fast);
78 spinlock_stats.spinning_time += delta;
81 static inline void spin_time_accum(u64 start)
83 u32 delta = xen_clocksource_read() - start;
85 __spin_time_accum(delta, spinlock_stats.histo_spin);
86 spinlock_stats.total_time += delta;
88 #else /* !CONFIG_XEN_DEBUG_FS */
89 #define TIMEOUT (1 << 10)
90 #define ADD_STATS(elem, val) do { (void)(val); } while(0)
92 static inline u64 spin_time_start(void)
97 static inline void spin_time_accum_fast(u64 start)
100 static inline void spin_time_accum(u64 start)
103 #endif /* CONFIG_XEN_DEBUG_FS */
105 struct xen_spinlock {
106 unsigned char lock; /* 0 -> free; 1 -> locked */
107 unsigned short spinners; /* count of waiting cpus */
110 static int xen_spin_is_locked(struct raw_spinlock *lock)
112 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
114 return xl->lock != 0;
117 static int xen_spin_is_contended(struct raw_spinlock *lock)
119 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
121 /* Not strictly true; this is only the count of contended
122 lock-takers entering the slow path. */
123 return xl->spinners != 0;
126 static int xen_spin_trylock(struct raw_spinlock *lock)
128 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
132 : "+q" (old), "+m" (xl->lock) : : "memory");
137 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
138 static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
141 * Mark a cpu as interested in a lock. Returns the CPU's previous
142 * lock of interest, in case we got preempted by an interrupt.
144 static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
146 struct xen_spinlock *prev;
148 prev = __get_cpu_var(lock_spinners);
149 __get_cpu_var(lock_spinners) = xl;
151 wmb(); /* set lock of interest before count */
153 asm(LOCK_PREFIX " incw %0"
154 : "+m" (xl->spinners) : : "memory");
160 * Mark a cpu as no longer interested in a lock. Restores previous
161 * lock of interest (NULL for none).
163 static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
165 asm(LOCK_PREFIX " decw %0"
166 : "+m" (xl->spinners) : : "memory");
167 wmb(); /* decrement count before restoring lock */
168 __get_cpu_var(lock_spinners) = prev;
171 static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
173 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
174 struct xen_spinlock *prev;
175 int irq = __get_cpu_var(lock_kicker_irq);
179 /* If kicker interrupts not initialized yet, just spin */
183 /* announce we're spinning */
184 prev = spinning_lock(xl);
186 flags = __raw_local_save_flags();
188 ADD_STATS(taken_slow_irqenable, 1);
189 raw_local_irq_enable();
192 ADD_STATS(taken_slow, 1);
193 ADD_STATS(taken_slow_nested, prev != NULL);
197 xen_clear_irq_pending(irq);
199 /* check again make sure it didn't become free while
200 we weren't looking */
201 ret = xen_spin_trylock(lock);
203 ADD_STATS(taken_slow_pickup, 1);
206 * If we interrupted another spinlock while it
207 * was blocking, make sure it doesn't block
208 * without rechecking the lock.
211 xen_set_irq_pending(irq);
216 * Block until irq becomes pending. If we're
217 * interrupted at this point (after the trylock but
218 * before entering the block), then the nested lock
219 * handler guarantees that the irq will be left
220 * pending if there's any chance the lock became free;
221 * xen_poll_irq() returns immediately if the irq is
225 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
226 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
228 kstat_this_cpu.irqs[irq]++;
231 raw_local_irq_restore(flags);
232 unspinning_lock(xl, prev);
236 static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
238 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
245 start_spin = spin_time_start();
248 u64 start_spin_fast = spin_time_start();
252 asm("1: xchgb %1,%0\n"
261 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
265 spin_time_accum_fast(start_spin_fast);
267 } while (unlikely(oldval != 0 &&
268 (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable))));
270 spin_time_accum(start_spin);
273 static void xen_spin_lock(struct raw_spinlock *lock)
275 __xen_spin_lock(lock, false);
278 static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
280 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
283 static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
287 ADD_STATS(released_slow, 1);
289 for_each_online_cpu(cpu) {
290 /* XXX should mix up next cpu selection */
291 if (per_cpu(lock_spinners, cpu) == xl) {
292 ADD_STATS(released_slow_kicked, 1);
293 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
299 static void xen_spin_unlock(struct raw_spinlock *lock)
301 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
303 ADD_STATS(released, 1);
305 smp_wmb(); /* make sure no writes get moved after unlock */
306 xl->lock = 0; /* release lock */
308 /* make sure unlock happens before kick */
311 if (unlikely(xl->spinners))
312 xen_spin_unlock_slow(xl);
315 static irqreturn_t dummy_handler(int irq, void *dev_id)
321 void __cpuinit xen_init_lock_cpu(int cpu)
326 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
327 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
330 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
335 disable_irq(irq); /* make sure it's never delivered */
336 per_cpu(lock_kicker_irq, cpu) = irq;
339 printk("cpu %d spinlock event irq %d\n", cpu, irq);
342 void __init xen_init_spinlocks(void)
344 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
345 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
346 pv_lock_ops.spin_lock = xen_spin_lock;
347 pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
348 pv_lock_ops.spin_trylock = xen_spin_trylock;
349 pv_lock_ops.spin_unlock = xen_spin_unlock;
352 #ifdef CONFIG_XEN_DEBUG_FS
354 static struct dentry *d_spin_debug;
356 static int __init xen_spinlock_debugfs(void)
358 struct dentry *d_xen = xen_init_debugfs();
363 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
365 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
367 debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout);
369 debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken);
370 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
371 &spinlock_stats.taken_slow);
372 debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug,
373 &spinlock_stats.taken_slow_nested);
374 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
375 &spinlock_stats.taken_slow_pickup);
376 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
377 &spinlock_stats.taken_slow_spurious);
378 debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug,
379 &spinlock_stats.taken_slow_irqenable);
381 debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
382 debugfs_create_u32("released_slow", 0444, d_spin_debug,
383 &spinlock_stats.released_slow);
384 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
385 &spinlock_stats.released_slow_kicked);
387 debugfs_create_u64("time_spinning", 0444, d_spin_debug,
388 &spinlock_stats.spinning_time);
389 debugfs_create_u64("time_total", 0444, d_spin_debug,
390 &spinlock_stats.total_time);
392 xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
393 spinlock_stats.histo_spin, HISTO_BUCKETS + 1);
394 xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
395 spinlock_stats.histo_spin_fast, HISTO_BUCKETS + 1);
399 fs_initcall(xen_spinlock_debugfs);
401 #endif /* CONFIG_XEN_DEBUG_FS */