1+ /*
2+ * Based on arch/arm/include/asm/atomic.h
3+ *
4+ * Copyright (C) 1996 Russell King.
5+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
6+ * Copyright (C) 2012 ARM Ltd.
7+ *
8+ * This program is free software; you can redistribute it and/or modify
9+ * it under the terms of the GNU General Public License version 2 as
10+ * published by the Free Software Foundation.
11+ *
12+ * This program is distributed in the hope that it will be useful,
13+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
14+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15+ * GNU General Public License for more details.
16+ *
17+ * You should have received a copy of the GNU General Public License
18+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
19+ */
20+ #ifndef __ASM_ATOMIC_H
21+ #define __ASM_ATOMIC_H
22+
23+ #include <asm/barrier.h>
24+ #include <asm/cmpxchg.h>
25+ #include <linux/compiler.h>
26+
27+ #define ATOMIC_INIT (i ) \
28+ { \
29+ (i) \
30+ }
31+
32+ /*
33+ * On ARM, ordinary assignment (str instruction) doesn't clear the local
34+ * strex/ldrex monitor on some implementations. The reason we can use it for
35+ * atomic_set() is the clrex or dummy strex done on every exception return.
36+ */
37+ #define atomic_read (v ) ACCESS_ONCE((v)->counter)
38+ #define atomic_set (v , i ) (((v)->counter) = (i))
39+
40+ /*
41+ * AArch64 UP and SMP safe atomic ops. We use load exclusive and
42+ * store exclusive to ensure that these are atomic. We may loop
43+ * to ensure that the update happens.
44+ */
45+
46+ #define ATOMIC_OP (op , asm_op ) \
47+ static inline void atomic_##op(int i, atomic_t *v) \
48+ { \
49+ unsigned long tmp; \
50+ int result; \
51+ \
52+ asm volatile("// atomic_" #op "\n" \
53+ "1: ldxr %w0, %2\n" \
54+ " " #asm_op " %w0, %w0, %w3\n" \
55+ " stxr %w1, %w0, %2\n" \
56+ " cbnz %w1, 1b" \
57+ : "=&r"(result), "=&r"(tmp), "+Q"(v->counter) \
58+ : "Ir"(i)); \
59+ }
60+
61+ #define ATOMIC_OP_RETURN (op , asm_op ) \
62+ static inline int atomic_##op##_return(int i, atomic_t *v) \
63+ { \
64+ unsigned long tmp; \
65+ int result; \
66+ \
67+ asm volatile("// atomic_" #op "_return\n" \
68+ "1: ldxr %w0, %2\n" \
69+ " " #asm_op " %w0, %w0, %w3\n" \
70+ " stlxr %w1, %w0, %2\n" \
71+ " cbnz %w1, 1b" \
72+ : "=&r"(result), "=&r"(tmp), "+Q"(v->counter) \
73+ : "Ir"(i) \
74+ : "memory"); \
75+ \
76+ smp_mb(); \
77+ return result; \
78+ }
79+
80+ #define ATOMIC_OPS (op , asm_op ) \
81+ ATOMIC_OP(op, asm_op) \
82+ ATOMIC_OP_RETURN(op, asm_op)
83+
84+ ATOMIC_OPS (add , add )
85+ ATOMIC_OPS (sub , sub )
86+
87+ #undef ATOMIC_OPS
88+ #undef ATOMIC_OP_RETURN
89+ #undef ATOMIC_OP
90+
91+ static inline int atomic_cmpxchg (atomic_t * ptr , int old , int new )
92+ {
93+ unsigned long tmp ;
94+ int oldval ;
95+
96+ smp_mb ();
97+
98+ asm volatile ("// atomic_cmpxchg\n"
99+ "1: ldxr %w1, %2\n"
100+ " cmp %w1, %w3\n"
101+ " b.ne 2f\n"
102+ " stxr %w0, %w4, %2\n"
103+ " cbnz %w0, 1b\n"
104+ "2:"
105+ : "=&r" (tmp ), "=&r" (oldval ), "+Q" (ptr -> counter )
106+ : "Ir" (old ), "r" (new )
107+ : "cc" );
108+
109+ smp_mb ();
110+ return oldval ;
111+ }
112+
113+ #define atomic_xchg (v , new ) (xchg(&((v)->counter), new))
114+
115+ static inline int __atomic_add_unless (atomic_t * v , int a , int u )
116+ {
117+ int c , old ;
118+
119+ c = atomic_read (v );
120+ while (c != u && (old = atomic_cmpxchg ((v ), c , c + a )) != c )
121+ c = old ;
122+ return c ;
123+ }
124+
125+ #define atomic_inc (v ) atomic_add(1, v)
126+ #define atomic_dec (v ) atomic_sub(1, v)
127+
128+ #define atomic_inc_and_test (v ) (atomic_add_return(1, v) == 0)
129+ #define atomic_dec_and_test (v ) (atomic_sub_return(1, v) == 0)
130+ #define atomic_inc_return (v ) (atomic_add_return(1, v))
131+ #define atomic_dec_return (v ) (atomic_sub_return(1, v))
132+ #define atomic_sub_and_test (i , v ) (atomic_sub_return(i, v) == 0)
133+
134+ #define atomic_add_negative (i , v ) (atomic_add_return(i, v) < 0)
135+
136+ /*
137+ * 64-bit atomic operations.
138+ */
139+ #define ATOMIC64_INIT (i ) \
140+ { \
141+ (i) \
142+ }
143+
144+ #define atomic64_read (v ) ACCESS_ONCE((v)->counter)
145+ #define atomic64_set (v , i ) (((v)->counter) = (i))
146+
147+ #define ATOMIC64_OP (op , asm_op ) \
148+ static inline void atomic64_##op(long i, atomic64_t *v) \
149+ { \
150+ long result; \
151+ unsigned long tmp; \
152+ \
153+ asm volatile("// atomic64_" #op "\n" \
154+ "1: ldxr %0, %2\n" \
155+ " " #asm_op " %0, %0, %3\n" \
156+ " stxr %w1, %0, %2\n" \
157+ " cbnz %w1, 1b" \
158+ : "=&r"(result), "=&r"(tmp), "+Q"(v->counter) \
159+ : "Ir"(i)); \
160+ }
161+
162+ #define ATOMIC64_OP_RETURN (op , asm_op ) \
163+ static inline long atomic64_##op##_return(long i, atomic64_t *v) \
164+ { \
165+ long result; \
166+ unsigned long tmp; \
167+ \
168+ asm volatile("// atomic64_" #op "_return\n" \
169+ "1: ldxr %0, %2\n" \
170+ " " #asm_op " %0, %0, %3\n" \
171+ " stlxr %w1, %0, %2\n" \
172+ " cbnz %w1, 1b" \
173+ : "=&r"(result), "=&r"(tmp), "+Q"(v->counter) \
174+ : "Ir"(i) \
175+ : "memory"); \
176+ \
177+ smp_mb(); \
178+ return result; \
179+ }
180+
181+ #define ATOMIC64_OPS (op , asm_op ) \
182+ ATOMIC64_OP(op, asm_op) \
183+ ATOMIC64_OP_RETURN(op, asm_op)
184+
185+ ATOMIC64_OPS (add , add )
186+ ATOMIC64_OPS (sub , sub )
187+
188+ #undef ATOMIC64_OPS
189+ #undef ATOMIC64_OP_RETURN
190+ #undef ATOMIC64_OP
191+
192+ static inline long atomic64_cmpxchg (atomic64_t * ptr , long old , long new )
193+ {
194+ long oldval ;
195+ unsigned long res ;
196+
197+ smp_mb ();
198+
199+ asm volatile ("// atomic64_cmpxchg\n"
200+ "1: ldxr %1, %2\n"
201+ " cmp %1, %3\n"
202+ " b.ne 2f\n"
203+ " stxr %w0, %4, %2\n"
204+ " cbnz %w0, 1b\n"
205+ "2:"
206+ : "=&r" (res ), "=&r" (oldval ), "+Q" (ptr -> counter )
207+ : "Ir" (old ), "r" (new )
208+ : "cc" );
209+
210+ smp_mb ();
211+ return oldval ;
212+ }
213+
214+ #define atomic64_xchg (v , new ) (xchg(&((v)->counter), new))
215+
216+ static inline long atomic64_dec_if_positive (atomic64_t * v )
217+ {
218+ long result ;
219+ unsigned long tmp ;
220+
221+ asm volatile ("// atomic64_dec_if_positive\n"
222+ "1: ldxr %0, %2\n"
223+ " subs %0, %0, #1\n"
224+ " b.mi 2f\n"
225+ " stlxr %w1, %0, %2\n"
226+ " cbnz %w1, 1b\n"
227+ " dmb ish\n"
228+ "2:"
229+ : "=&r" (result ), "=&r" (tmp ), "+Q" (v -> counter )
230+ :
231+ : "cc" , "memory" );
232+
233+ return result ;
234+ }
235+
236+ static inline int atomic64_add_unless (atomic64_t * v , long a , long u )
237+ {
238+ long c , old ;
239+
240+ c = atomic64_read (v );
241+ while (c != u && (old = atomic64_cmpxchg ((v ), c , c + a )) != c )
242+ c = old ;
243+
244+ return c != u ;
245+ }
246+
247+ #define atomic64_add_negative (a , v ) (atomic64_add_return((a), (v)) < 0)
248+ #define atomic64_inc (v ) atomic64_add(1LL, (v))
249+ #define atomic64_inc_return (v ) atomic64_add_return(1LL, (v))
250+ #define atomic64_inc_and_test (v ) (atomic64_inc_return(v) == 0)
251+ #define atomic64_sub_and_test (a , v ) (atomic64_sub_return((a), (v)) == 0)
252+ #define atomic64_dec (v ) atomic64_sub(1LL, (v))
253+ #define atomic64_dec_return (v ) atomic64_sub_return(1LL, (v))
254+ #define atomic64_dec_and_test (v ) (atomic64_dec_return((v)) == 0)
255+ #define atomic64_inc_not_zero (v ) atomic64_add_unless((v), 1LL, 0LL)
256+
257+ #endif
0 commit comments