]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
spinlock: lockbreak cleanup
authorNick Piggin <npiggin@suse.de>
Wed, 30 Jan 2008 12:31:20 +0000 (13:31 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:31:20 +0000 (13:31 +0100)
The break_lock data structure and code for spinlocks is quite nasty.
Not only does it double the size of a spinlock but it changes locking to
a potentially less optimal trylock.

Put all of that under CONFIG_GENERIC_LOCKBREAK, and introduce a
__raw_spin_is_contended that uses the lock data itself to determine whether
there are waiters on the lock, to be used if CONFIG_GENERIC_LOCKBREAK is
not set.

Rename need_lockbreak to spin_needbreak, make it use spin_is_contended to
decouple it from the spinlock implementation, and make it typesafe (rwlocks
do not have any need_lockbreak sites -- why do they even get bloated up
with that break_lock then?).

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
19 files changed:
arch/arm/Kconfig
arch/ia64/Kconfig
arch/m32r/Kconfig
arch/mips/Kconfig
arch/parisc/Kconfig
arch/powerpc/Kconfig
arch/sparc64/Kconfig
arch/x86/Kconfig
fs/jbd/checkpoint.c
fs/jbd/commit.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
include/linux/sched.h
include/linux/spinlock.h
include/linux/spinlock_types.h
include/linux/spinlock_up.h
kernel/sched.c
kernel/spinlock.c
mm/memory.c

index de211ac3853e90aa16017641efd295e8496cf3e9..77201d3f7479ccc65bf2e454fa310be75580a49b 100644 (file)
@@ -91,6 +91,11 @@ config GENERIC_IRQ_PROBE
        bool
        default y
 
+config GENERIC_LOCKBREAK
+       bool
+       default y
+       depends on SMP && PREEMPT
+
 config RWSEM_GENERIC_SPINLOCK
        bool
        default y
index bef47725d4ad461310e181d98fb3d04aa214b061..4a81b7fb191a527ed7dd69bd1d78165aaec64063 100644 (file)
@@ -42,6 +42,11 @@ config MMU
 config SWIOTLB
        bool
 
+config GENERIC_LOCKBREAK
+       bool
+       default y
+       depends on SMP && PREEMPT
+
 config RWSEM_XCHGADD_ALGORITHM
        bool
        default y
index ab9a264cb1947cb6a938c523e677f4c64873b72e..f7237c5f531e511806133f0223ecd48edb0399ab 100644 (file)
@@ -235,6 +235,11 @@ config IRAM_SIZE
 # Define implied options from the CPU selection here
 #
 
+config GENERIC_LOCKBREAK
+       bool
+       default y
+       depends on SMP && PREEMPT
+
 config RWSEM_GENERIC_SPINLOCK
        bool
        depends on M32R
index 6b0f85f02c7966895656fb5a13681041e52f2394..4fad0a34b9974d31f16b5e1e6a8bb6e3c067e710 100644 (file)
@@ -694,6 +694,11 @@ source "arch/mips/vr41xx/Kconfig"
 
 endmenu
 
+config GENERIC_LOCKBREAK
+       bool
+       default y
+       depends on SMP && PREEMPT
+
 config RWSEM_GENERIC_SPINLOCK
        bool
        default y
index b8ef1787a191ce81337590391067b9dda811b89d..2b649c46631c14fb07d408dd3bad2e0869a8dae6 100644 (file)
@@ -19,6 +19,11 @@ config MMU
 config STACK_GROWSUP
        def_bool y
 
+config GENERIC_LOCKBREAK
+       bool
+       default y
+       depends on SMP && PREEMPT
+
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
index 232c298c933fa3601b9fa9099b3eeee26e40d514..c17a194beb0e6c94560d97c469ccfbd3327686d9 100644 (file)
@@ -53,6 +53,11 @@ config RWSEM_XCHGADD_ALGORITHM
        bool
        default y
 
+config GENERIC_LOCKBREAK
+       bool
+       default y
+       depends on SMP && PREEMPT
+
 config ARCH_HAS_ILOG2_U32
        bool
        default y
index 10b212a1f9f5aa046dbe1e2b5702dbcba9a3b7c0..1e25bce0366d130f9ae1e2fda5c215fe3fde5e39 100644 (file)
@@ -200,6 +200,11 @@ config US2E_FREQ
          If in doubt, say N.
 
 # Global things across all Sun machines.
+config GENERIC_LOCKBREAK
+       bool
+       default y
+       depends on SMP && PREEMPT
+
 config RWSEM_GENERIC_SPINLOCK
        bool
 
index 23936301db561eb110529d1e55cf03597bca40d9..db434f8171d3e46041f28035e18de814ef65342d 100644 (file)
@@ -19,6 +19,10 @@ config X86_64
 config X86
        def_bool y
 
+config GENERIC_LOCKBREAK
+       def_bool y
+       depends on SMP && PREEMPT
+
 config GENERIC_TIME
        def_bool y
 
index 0f69c416eebc6adef7d05eeaf3283083adb9e700..a5432bbbfb88ab678a63b8cfb55b4d04cf8eae7d 100644 (file)
@@ -347,7 +347,8 @@ restart:
                                break;
                        }
                        retry = __process_buffer(journal, jh, bhs,&batch_count);
-                       if (!retry && lock_need_resched(&journal->j_list_lock)){
+                       if (!retry && (need_resched() ||
+                               spin_needbreak(&journal->j_list_lock))) {
                                spin_unlock(&journal->j_list_lock);
                                retry = 1;
                                break;
index 610264b99a8e8bb357ed46dc9a08735443ab2659..31853eb65b4cb0f2bf4f365e21a0456e7362d1b1 100644 (file)
@@ -265,7 +265,7 @@ write_out_data:
                        put_bh(bh);
                }
 
-               if (lock_need_resched(&journal->j_list_lock)) {
+               if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
                        spin_unlock(&journal->j_list_lock);
                        goto write_out_data;
                }
index 1b7f282c1ae9e9940ca57009e3d3c6f73054433b..6914598022ce836e10a13aa8be50aab1ec3bdc74 100644 (file)
@@ -353,7 +353,8 @@ restart:
                        }
                        retry = __process_buffer(journal, jh, bhs, &batch_count,
                                                 transaction);
-                       if (!retry && lock_need_resched(&journal->j_list_lock)){
+                       if (!retry && (need_resched() ||
+                               spin_needbreak(&journal->j_list_lock))) {
                                spin_unlock(&journal->j_list_lock);
                                retry = 1;
                                break;
index da8d0eb3b7b9c8933091561e7e44852fd814de66..4f302d2792794008351326bf9d6e128cf8b18a33 100644 (file)
@@ -341,7 +341,7 @@ write_out_data:
                        put_bh(bh);
                }
 
-               if (lock_need_resched(&journal->j_list_lock)) {
+               if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
                        spin_unlock(&journal->j_list_lock);
                        goto write_out_data;
                }
index 2d0546e884ea0fe443f88a74a01f39bbd5f86cb4..9d4797609aa5e368439671d603da5f7e7bedb979 100644 (file)
@@ -1922,23 +1922,16 @@ extern int cond_resched_softirq(void);
 
 /*
  * Does a critical section need to be broken due to another
- * task waiting?:
+ * task waiting?: (technically does not depend on CONFIG_PREEMPT,
+ * but a general need for low latency)
  */
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
-# define need_lockbreak(lock) ((lock)->break_lock)
-#else
-# define need_lockbreak(lock) 0
-#endif
-
-/*
- * Does a critical section need to be broken due to another
- * task waiting or preemption being signalled:
- */
-static inline int lock_need_resched(spinlock_t *lock)
+static inline int spin_needbreak(spinlock_t *lock)
 {
-       if (need_lockbreak(lock) || need_resched())
-               return 1;
+#ifdef CONFIG_PREEMPT
+       return spin_is_contended(lock);
+#else
        return 0;
+#endif
 }
 
 /*
index c376f3b36c8980c9b434cabc693e7ef9b4ab43f3..124449733c55a64595198aa8dc8eaf6a682189ca 100644 (file)
@@ -120,6 +120,12 @@ do {                                                               \
 
 #define spin_is_locked(lock)   __raw_spin_is_locked(&(lock)->raw_lock)
 
+#ifdef CONFIG_GENERIC_LOCKBREAK
+#define spin_is_contended(lock) ((lock)->break_lock)
+#else
+#define spin_is_contended(lock)        __raw_spin_is_contended(&(lock)->raw_lock)
+#endif
+
 /**
  * spin_unlock_wait - wait until the spinlock gets unlocked
  * @lock: the spinlock in question.
index f6a3a951b79eda3fc409913d10819361fe41faa3..68d88f71f1a2049c2f8be89c47aab85978f21054 100644 (file)
@@ -19,7 +19,7 @@
 
 typedef struct {
        raw_spinlock_t raw_lock;
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
+#ifdef CONFIG_GENERIC_LOCKBREAK
        unsigned int break_lock;
 #endif
 #ifdef CONFIG_DEBUG_SPINLOCK
@@ -35,7 +35,7 @@ typedef struct {
 
 typedef struct {
        raw_rwlock_t raw_lock;
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
+#ifdef CONFIG_GENERIC_LOCKBREAK
        unsigned int break_lock;
 #endif
 #ifdef CONFIG_DEBUG_SPINLOCK
index ea54c4c9a4ecd2c678b6f8d717b0b8bc98d50cfa..938234c4a996ba6e78521ffc60ecc197034fed9d 100644 (file)
@@ -64,6 +64,8 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 # define __raw_spin_trylock(lock)      ({ (void)(lock); 1; })
 #endif /* DEBUG_SPINLOCK */
 
+#define __raw_spin_is_contended(lock)  (((void)(lock), 0))
+
 #define __raw_read_can_lock(lock)      (((void)(lock), 1))
 #define __raw_write_can_lock(lock)     (((void)(lock), 1))
 
index 524285e46fa788e7e0a04612a611965b7650a2d5..ba4c88088f62c53ab8ec414e3d77da65b1cd9f67 100644 (file)
@@ -4945,19 +4945,15 @@ EXPORT_SYMBOL(_cond_resched);
  */
 int cond_resched_lock(spinlock_t *lock)
 {
+       int resched = need_resched() && system_state == SYSTEM_RUNNING;
        int ret = 0;
 
-       if (need_lockbreak(lock)) {
+       if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
-               cpu_relax();
-               ret = 1;
-               spin_lock(lock);
-       }
-       if (need_resched() && system_state == SYSTEM_RUNNING) {
-               spin_release(&lock->dep_map, 1, _THIS_IP_);
-               _raw_spin_unlock(lock);
-               preempt_enable_no_resched();
-               __cond_resched();
+               if (resched && need_resched())
+                       __cond_resched();
+               else
+                       cpu_relax();
                ret = 1;
                spin_lock(lock);
        }
index cd72424c26625765085e0b2306345d474cb9476d..ae28c82451237a7ee0b0d8653701e7eed5437c6b 100644 (file)
@@ -65,8 +65,7 @@ EXPORT_SYMBOL(_write_trylock);
  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  */
-#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
-       defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
 
 void __lockfunc _read_lock(rwlock_t *lock)
 {
index 4b0144b24c123681dcd9e95e715ae56b009355d7..673ebbf499c75860286c107bb20406d952524aad 100644 (file)
@@ -513,8 +513,7 @@ again:
                if (progress >= 32) {
                        progress = 0;
                        if (need_resched() ||
-                           need_lockbreak(src_ptl) ||
-                           need_lockbreak(dst_ptl))
+                           spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
                                break;
                }
                if (pte_none(*src_pte)) {
@@ -853,7 +852,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                        tlb_finish_mmu(*tlbp, tlb_start, start);
 
                        if (need_resched() ||
-                               (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
+                               (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
                                if (i_mmap_lock) {
                                        *tlbp = NULL;
                                        goto out;
@@ -1768,8 +1767,7 @@ again:
 
        restart_addr = zap_page_range(vma, start_addr,
                                        end_addr - start_addr, details);
-       need_break = need_resched() ||
-                       need_lockbreak(details->i_mmap_lock);
+       need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
 
        if (restart_addr >= end_addr) {
                /* We have now completed this vma: mark it so */