]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
MIPS: Get rid of atomic_lock.
authorRalf Baechle <ralf@linux-mips.org>
Wed, 7 Dec 2005 18:57:52 +0000 (18:57 +0000)
committer <ralf@denk.linux-mips.net> <>
Tue, 10 Jan 2006 13:39:06 +0000 (13:39 +0000)
It was resulting in build errors for some configurations.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
include/asm-mips/atomic.h

index 94a95872d7276914fecd98041b712c6c937dbceb..654b97d3e13a405302cdf0e96d04f83977841bf5 100644 (file)
 #define _ASM_ATOMIC_H
 
 #include <asm/cpu-features.h>
+#include <asm/interrupt.h>
 #include <asm/war.h>
 
-extern spinlock_t atomic_lock;
-
 typedef struct { volatile int counter; } atomic_t;
 
 #define ATOMIC_INIT(i)    { (i) }
@@ -85,9 +84,9 @@ static __inline__ void atomic_add(int i, atomic_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                v->counter += i;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 }
 
@@ -127,9 +126,9 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                v->counter -= i;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 }
 
@@ -173,11 +172,11 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result += i;
                v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
@@ -220,11 +219,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result -= i;
                v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
@@ -277,12 +276,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result -= i;
                if (result >= 0)
                        v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
@@ -433,9 +432,9 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                v->counter += i;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 }
 
@@ -475,9 +474,9 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                v->counter -= i;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 }
 
@@ -521,11 +520,11 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result += i;
                v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
@@ -568,11 +567,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result -= i;
                v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;
@@ -625,12 +624,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
        } else {
                unsigned long flags;
 
-               spin_lock_irqsave(&atomic_lock, flags);
+               local_irq_save(flags);
                result = v->counter;
                result -= i;
                if (result >= 0)
                        v->counter = result;
-               spin_unlock_irqrestore(&atomic_lock, flags);
+               local_irq_restore(flags);
        }
 
        return result;