blob: 0130fd47d39a6ba788411c99ea558f66664bb348 [file] [log] [blame]
Thomas Chou29e9e292010-03-20 07:05:46 +08001#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_ATOMIC_H_
3
4#include <asm/types.h>
5#include <asm/system.h>
6
7#ifdef CONFIG_SMP
8#include <asm/spinlock.h>
9#include <asm/cache.h> /* we use L1_CACHE_BYTES */
10
11/* Use an array of spinlocks for our atomic_ts.
12 * Hash function to index into a different SPINLOCK.
13 * Since "a" is usually an address, use one spinlock per cacheline.
14 */
15# define ATOMIC_HASH_SIZE 4
16# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
17
18extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
19
20/* Can't use raw_spin_lock_irq because of #include problems, so
21 * this is the substitute */
22#define _atomic_spin_lock_irqsave(l,f) do { \
23 raw_spinlock_t *s = ATOMIC_HASH(l); \
24 local_irq_save(f); \
25 __raw_spin_lock(s); \
26} while(0)
27
28#define _atomic_spin_unlock_irqrestore(l,f) do { \
29 raw_spinlock_t *s = ATOMIC_HASH(l); \
30 __raw_spin_unlock(s); \
31 local_irq_restore(f); \
32} while(0)
33
Thomas Chou29e9e292010-03-20 07:05:46 +080034#else
35# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
36# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
37#endif
38
39/*
40 * NMI events can occur at any time, including when interrupts have been
41 * disabled by *_irqsave(). So you can get NMI events occurring while a
42 * *_bit function is holding a spin lock. If the NMI handler also wants
43 * to do bit manipulation (and they do) then you can get a deadlock
44 * between the original caller of *_bit() and the NMI handler.
45 *
46 * by Keith Owens
47 */
48
49/**
50 * set_bit - Atomically set a bit in memory
51 * @nr: the bit to set
52 * @addr: the address to start counting from
53 *
54 * This function is atomic and may not be reordered. See __set_bit()
55 * if you do not require the atomic guarantees.
56 *
57 * Note: there are no guarantees that this function will not be reordered
58 * on non x86 architectures, so if you are writing portable code,
59 * make sure not to rely on its reordering guarantees.
60 *
61 * Note that @nr may be almost arbitrarily large; this function is not
62 * restricted to acting on a single-word quantity.
63 */
64static inline void set_bit(int nr, volatile unsigned long *addr)
65{
66 unsigned long mask = BIT_MASK(nr);
67 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
68 unsigned long flags;
69
70 _atomic_spin_lock_irqsave(p, flags);
71 *p |= mask;
72 _atomic_spin_unlock_irqrestore(p, flags);
73}
74
75/**
76 * clear_bit - Clears a bit in memory
77 * @nr: Bit to clear
78 * @addr: Address to start counting from
79 *
80 * clear_bit() is atomic and may not be reordered. However, it does
81 * not contain a memory barrier, so if it is used for locking purposes,
82 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
83 * in order to ensure changes are visible on other processors.
84 */
85static inline void clear_bit(int nr, volatile unsigned long *addr)
86{
87 unsigned long mask = BIT_MASK(nr);
88 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
89 unsigned long flags;
90
91 _atomic_spin_lock_irqsave(p, flags);
92 *p &= ~mask;
93 _atomic_spin_unlock_irqrestore(p, flags);
94}
95
96/**
97 * change_bit - Toggle a bit in memory
98 * @nr: Bit to change
99 * @addr: Address to start counting from
100 *
101 * change_bit() is atomic and may not be reordered. It may be
102 * reordered on other architectures than x86.
103 * Note that @nr may be almost arbitrarily large; this function is not
104 * restricted to acting on a single-word quantity.
105 */
106static inline void change_bit(int nr, volatile unsigned long *addr)
107{
108 unsigned long mask = BIT_MASK(nr);
109 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
110 unsigned long flags;
111
112 _atomic_spin_lock_irqsave(p, flags);
113 *p ^= mask;
114 _atomic_spin_unlock_irqrestore(p, flags);
115}
116
117/**
118 * test_and_set_bit - Set a bit and return its old value
119 * @nr: Bit to set
120 * @addr: Address to count from
121 *
122 * This operation is atomic and cannot be reordered.
123 * It may be reordered on other architectures than x86.
124 * It also implies a memory barrier.
125 */
126static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
127{
128 unsigned long mask = BIT_MASK(nr);
129 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
130 unsigned long old;
131 unsigned long flags;
132
133 _atomic_spin_lock_irqsave(p, flags);
134 old = *p;
135 *p = old | mask;
136 _atomic_spin_unlock_irqrestore(p, flags);
137
138 return (old & mask) != 0;
139}
140
141/**
142 * test_and_clear_bit - Clear a bit and return its old value
143 * @nr: Bit to clear
144 * @addr: Address to count from
145 *
146 * This operation is atomic and cannot be reordered.
147 * It can be reorderdered on other architectures other than x86.
148 * It also implies a memory barrier.
149 */
150static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
151{
152 unsigned long mask = BIT_MASK(nr);
153 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
154 unsigned long old;
155 unsigned long flags;
156
157 _atomic_spin_lock_irqsave(p, flags);
158 old = *p;
159 *p = old & ~mask;
160 _atomic_spin_unlock_irqrestore(p, flags);
161
162 return (old & mask) != 0;
163}
164
165/**
166 * test_and_change_bit - Change a bit and return its old value
167 * @nr: Bit to change
168 * @addr: Address to count from
169 *
170 * This operation is atomic and cannot be reordered.
171 * It also implies a memory barrier.
172 */
173static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
174{
175 unsigned long mask = BIT_MASK(nr);
176 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
177 unsigned long old;
178 unsigned long flags;
179
180 _atomic_spin_lock_irqsave(p, flags);
181 old = *p;
182 *p = old ^ mask;
183 _atomic_spin_unlock_irqrestore(p, flags);
184
185 return (old & mask) != 0;
186}
187
188#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */