#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
-#define ADDR "=m" (*(volatile long *)addr)
-#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5])
+#define ADDR "=m" (*(volatile long *) addr)
#else
#define ADDR "+m" (*(volatile long *) addr)
-#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5])
#endif
-#define BASE_ADDR "m" (*(volatile int *)addr)
/**
* set_bit - Atomically set a bit in memory
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(int nr, volatile void *addr)
+static inline void set_bit(int nr, volatile unsigned long *addr)
{
asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __set_bit(int nr, volatile void *addr)
+static inline void __set_bit(int nr, volatile unsigned long *addr)
{
- asm volatile("bts %1,%0"
- : ADDR
- : "Ir" (nr) : "memory");
+ asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
/**
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static inline void clear_bit(int nr, volatile void *addr)
+static inline void clear_bit(int nr, volatile unsigned long *addr)
{
- asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
+ asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr));
}
/*
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
-static inline void clear_bit_unlock(unsigned nr, volatile void *addr)
+static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
- asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
+ asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
/*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
-static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
+static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(int nr, volatile unsigned long *addr)
{
- asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
+ asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
}
/**
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(int nr, volatile void *addr)
+static inline void change_bit(int nr, volatile unsigned long *addr)
{
- asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
+ asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
}
/**
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_set_bit(int nr, volatile void *addr)
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
*
* This is the same as test_and_set_bit on x86.
*/
-static inline int test_and_set_bit_lock(int nr, volatile void *addr)
+static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(int nr, volatile void *addr)
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
- asm volatile("bts %2,%3\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
+ asm("bts %2,%1\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit), ADDR
+ : "Ir" (nr));
return oldbit;
}
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_clear_bit(int nr, volatile void *addr)
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_clear_bit(int nr, volatile void *addr)
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
- asm volatile("btr %2,%3\n\t"
+ asm volatile("btr %2,%1\n\t"
"sbb %0,%0"
- : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
+ : "=r" (oldbit), ADDR
+ : "Ir" (nr));
return oldbit;
}
/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
- asm volatile("btc %2,%3\n\t"
+ asm volatile("btc %2,%1\n\t"
"sbb %0,%0"
- : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
+ : "=r" (oldbit), ADDR
+ : "Ir" (nr) : "memory");
return oldbit;
}
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_change_bit(int nr, volatile void *addr)
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
return oldbit;
}
-static inline int constant_test_bit(int nr, const volatile void *addr)
+static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
}
-static inline int variable_test_bit(int nr, volatile const void *addr)
+static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
{
int oldbit;
- asm volatile("bt %2,%3\n\t"
+ asm volatile("bt %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit)
- : "m" (((volatile const int *)addr)[nr >> 5]),
- "Ir" (nr), BASE_ADDR);
+ : "m" (*(unsigned long *)addr), "Ir" (nr));
return oldbit;
}
static int test_bit(int nr, const volatile unsigned long *addr);
#endif
-#define test_bit(nr,addr) \
- (__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
+#define test_bit(nr, addr) \
+ (__builtin_constant_p((nr)) \
+ ? constant_test_bit((nr), (addr)) \
+ : variable_test_bit((nr), (addr)))
-#undef BASE_ADDR
-#undef BIT_ADDR
/**
* __ffs - find first set bit in word
* @word: The word to search
*/
static inline unsigned long __ffs(unsigned long word)
{
- __asm__("bsf %1,%0"
- :"=r" (word)
- :"rm" (word));
+ asm("bsf %1,%0"
+ : "=r" (word)
+ : "rm" (word));
return word;
}
*/
static inline unsigned long ffz(unsigned long word)
{
- __asm__("bsf %1,%0"
- :"=r" (word)
- :"r" (~word));
+ asm("bsf %1,%0"
+ : "=r" (word)
+ : "r" (~word));
return word;
}
*/
static inline unsigned long __fls(unsigned long word)
{
- __asm__("bsr %1,%0"
- :"=r" (word)
- :"rm" (word));
+ asm("bsr %1,%0"
+ : "=r" (word)
+ : "rm" (word));
return word;
}
{
int r;
#ifdef CONFIG_X86_CMOV
- __asm__("bsfl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=r" (r) : "rm" (x), "r" (-1));
+ asm("bsfl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=r" (r) : "rm" (x), "r" (-1));
#else
- __asm__("bsfl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
+ asm("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
{
int r;
#ifdef CONFIG_X86_CMOV
- __asm__("bsrl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=&r" (r) : "rm" (x), "rm" (-1));
+ asm("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
#else
- __asm__("bsrl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
+ asm("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
#undef ADDR
-#ifdef CONFIG_X86_32
-# include "bitops_32.h"
-#else
-# include "bitops_64.h"
-#endif
+static inline void set_bit_string(unsigned long *bitmap,
+ unsigned long i, int len)
+{
+ unsigned long end = i + len;
+ while (i < end) {
+ __set_bit(i, bitmap);
+ i++;
+ }
+}
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/sched.h>
+
+#define ARCH_HAS_FAST_MULTIPLIER 1
+#include <asm-generic/bitops/hweight.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/fls64.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/ext2-non-atomic.h>
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_bit((nr), (unsigned long *)(addr))
+
+#include <asm-generic/bitops/minix.h>
+
+#endif /* __KERNEL__ */
#endif /* _ASM_X86_BITOPS_H */