]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
IA64: Slim down __clear_bit_unlock
authorChristoph Lameter <clameter@sgi.com>
Wed, 19 Dec 2007 00:22:46 +0000 (16:22 -0800)
committerTony Luck <tony.luck@intel.com>
Wed, 19 Dec 2007 00:22:46 +0000 (16:22 -0800)
__clear_bit_unlock does not need to perform atomic operations on the
variable.  Avoid a cmpxchg and simply do a store with release semantics.
Add a barrier to be safe that the compiler does not do funky things.

Tony: Use intrinsic rather than inline assembler

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Tony Luck <tony.luck@intel.com>
include/asm-ia64/bitops.h
include/asm-ia64/gcc_intrin.h
include/asm-ia64/intel_intrin.h

index a977affaebeca34b02b1e9bb991f4ad83cd35281..a1b9719f5fbb7ba11071a92ddcd611e6945e2c2e 100644 (file)
@@ -124,10 +124,21 @@ clear_bit_unlock (int nr, volatile void *addr)
 /**
  * __clear_bit_unlock - Non-atomically clear a bit with release
  *
- * This is like clear_bit_unlock, but the implementation may use a non-atomic
- * store (this one uses an atomic, however).
+ * This is like clear_bit_unlock, but the implementation uses a store
+ * with release semantics. See also __raw_spin_unlock().
  */
-#define __clear_bit_unlock clear_bit_unlock
+static __inline__ void
+__clear_bit_unlock(int nr, volatile void *addr)
+{
+       __u32 mask, new;
+       volatile __u32 *m;
+
+       m = (volatile __u32 *)addr + (nr >> 5);
+       mask = ~(1 << (nr & 31));
+       new = *m & mask;
+       barrier();
+       ia64_st4_rel_nta(m, new);
+}
 
 /**
  * __clear_bit - Clears a bit in memory (non-atomic version)
index 4fb4e439b05c35eca06daf2762936939c3e1b333..e58d3298fa109eb4ba733d580e39d3b06580dbcc 100644 (file)
@@ -191,6 +191,11 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
        asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
 })
 
+#define ia64_st4_rel_nta(m, val)                                       \
+({                                                                     \
+       asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
+})
+
 #define ia64_stfs(x, regnum)                                           \
 ({                                                                     \
        register double __f__ asm ("f"#regnum);                         \
index d069b6acddce1e82838001f739fee3de431195be..a520d103d80869d358a5e3d442c5b63a144dbdbc 100644 (file)
 #define ia64_st4_rel           __st4_rel
 #define ia64_st8_rel           __st8_rel
 
+/* FIXME: need st4.rel.nta intrinsic */
+#define ia64_st4_rel_nta       __st4_rel
+
 #define ia64_ld1_acq           __ld1_acq
 #define ia64_ld2_acq           __ld2_acq
 #define ia64_ld4_acq           __ld4_acq