Main Page   Class Hierarchy   Alphabetical List   Compound List   File List   Compound Members

FXAtomic.h
1 /********************************************************************************
2 * *
3 * A t o m i c O p e r a t i o n s *
4 * *
5 *********************************************************************************
6 * Copyright (C) 2006,2024 by Jeroen van der Zijp. All Rights Reserved. *
7 *********************************************************************************
8 * This library is free software; you can redistribute it and/or modify *
9 * it under the terms of the GNU Lesser General Public License as published by *
10 * the Free Software Foundation; either version 3 of the License, or *
11 * (at your option) any later version. *
12 * *
13 * This library is distributed in the hope that it will be useful, *
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
16 * GNU Lesser General Public License for more details. *
17 * *
18 * You should have received a copy of the GNU Lesser General Public License *
19 * along with this program. If not, see <http://www.gnu.org/licenses/> *
20 ********************************************************************************/
21 #ifndef FXATOMIC_H
22 #define FXATOMIC_H
23 
24 
25 namespace FX {
26 
27 
29 static inline void atomicThreadFence(){
30 #if defined(_WIN32) && (_MSC_VER >= 1500)
31  _ReadWriteBarrier();
32 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
33  __atomic_thread_fence(__ATOMIC_SEQ_CST);
34 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
35  __sync_synchronize();
36 #else
37 #warning "atomicThreadFence(): not implemented."
38 #endif
39  }
40 
41 
43 
44 
46 static inline FXint atomicSet(volatile FXint* ptr,FXint v){
47 #if defined(_WIN32) && (_MSC_VER >= 1500)
48  return _InterlockedExchange((volatile long*)ptr,(long)v);
49 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
50  return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
51 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
52  return __sync_lock_test_and_set(ptr,v);
53 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
54  FXint ret=v;
55  __asm__ __volatile__("xchgl %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(ret) : "memory", "cc");
56  return ret;
57 #else
58 #warning "atomicSet(volatile FXint*,FXint): not implemented."
59  FXint ret=*ptr; *ptr=v;
60  return ret;
61 #endif
62  }
63 
64 
66 static inline FXint atomicCas(volatile FXint* ptr,FXint expect,FXint v){
67 #if defined(_WIN32) && (_MSC_VER >= 1500)
68  return _InterlockedCompareExchange((volatile long*)ptr,(long)v,(long)expect);
69 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
70  FXint ex=expect;
71  __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
72  return ex;
73 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
74  return __sync_val_compare_and_swap(ptr,expect,v);
75 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
76  FXint ret;
77  __asm__ __volatile__("lock\n\t"
78  "cmpxchgl %2,(%1)\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
79  return ret;
80 #else
81 #warning "atomicCas(volatile FXint*,FXint,FXint): not implemented."
82  FXint ret=*ptr;
83  if(*ptr==expect){ *ptr=v; }
84  return ret;
85 #endif
86  }
87 
88 
90 static inline FXbool atomicBoolCas(volatile FXint* ptr,FXint expect,FXint v){
91 #if defined(_WIN32) && (_MSC_VER >= 1500)
92  return (_InterlockedCompareExchange((volatile long*)ptr,(long)v,(long)expect)==(long)expect);
93 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_INT_LOCK_FREE == 2)
94  FXint ex=expect;
95  return __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
96 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
97  return __sync_bool_compare_and_swap(ptr,expect,v);
98 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
99  FXbool ret;
100  __asm__ __volatile__ ("lock\n\t"
101  "cmpxchgl %2,(%1)\n\t"
102  "sete %%al\n\t"
103  "andl $1, %%eax\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
104  return ret;
105 #else
106 #warning "atomicBoolCas(volatile FXint*,FXint,FXint): not implemented."
107  if(*ptr==expect){ *ptr=v; return true; }
108  return false;
109 #endif
110  }
111 
112 
114 static inline FXint atomicAdd(volatile FXint* ptr,FXint v){
115 #if defined(_WIN32) && (_MSC_VER >= 1500)
116  return _InterlockedExchangeAdd((volatile long*)ptr,(long)v);
117 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
118  return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
119 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
120  return __sync_fetch_and_add(ptr,v);
121 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
122  FXint ret=v;
123  __asm__ __volatile__ ("lock\n\t"
124  "xaddl %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(ret) : "memory", "cc");
125  return ret;
126 #else
127 #warning "atomicAdd(volatile FXint*,FXint): not implemented."
128  FXint ret=*ptr; *ptr+=v;
129  return ret;
130 #endif
131  }
132 
133 
135 static inline FXint atomicAnd(volatile FXint* ptr,FXint v){
136 #if defined(_WIN32) && (_MSC_VER >= 1500)
137  return _InterlockedAnd((volatile long*)ptr,(long)v);
138 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
139  return __atomic_fetch_and(ptr,v,__ATOMIC_SEQ_CST);
140 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
141  return __sync_fetch_and_and(ptr,v);
142 #else
143 #warning "atomicAnd(volatile FXint*,FXint): not implemented."
144  FXint ret=*ptr; *ptr&=v;
145  return ret;
146 #endif
147  }
148 
149 
151 static inline FXint atomicOr(volatile FXint* ptr,FXint v){
152 #if defined(_WIN32) && (_MSC_VER >= 1500)
153  return _InterlockedOr((volatile long*)ptr,(long)v);
154 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
155  return __atomic_fetch_or(ptr,v,__ATOMIC_SEQ_CST);
156 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
157  return __sync_fetch_and_or(ptr,v);
158 #else
159 #warning "atomicOr(volatile FXint*,FXint): not implemented."
160  FXint ret=*ptr; *ptr|=v;
161  return ret;
162 #endif
163  }
164 
165 
167 static inline FXint atomicXor(volatile FXint* ptr,FXint v){
168 #if defined(_WIN32) && (_MSC_VER >= 1500)
169  return _InterlockedXor((volatile long*)ptr,(long)v);
170 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
171  return __atomic_fetch_xor(ptr,v,__ATOMIC_SEQ_CST);
172 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
173  return __sync_fetch_and_xor(ptr,v);
174 #else
175 #warning "atomicXor(volatile FXint*,FXint): not implemented."
176  FXint ret=*ptr; *ptr^=v;
177  return ret;
178 #endif
179  }
180 
181 
183 static inline FXint atomicMin(volatile FXint* ptr,FXint v){
184  FXint old;
185  while(v<(old=*ptr) && !atomicBoolCas(ptr,old,v)){ }
186  return old;
187  }
188 
189 
191 static inline FXint atomicMax(volatile FXint* ptr,FXint v){
192  FXint old;
193  while((old=*ptr)<v && !atomicBoolCas(ptr,old,v)){ }
194  return old;
195  }
196 
197 
199 
200 
202 static inline FXuint atomicSet(volatile FXuint* ptr,FXuint v){
203 #if defined(_WIN32) && (_MSC_VER >= 1500)
204  return _InterlockedExchange((volatile long*)ptr,(long)v);
205 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
206  return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
207 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
208  return __sync_lock_test_and_set(ptr,v);
209 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
210  FXuint ret=v;
211  __asm__ __volatile__("xchgl %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(ret) : "memory", "cc");
212  return ret;
213 #else
214 #warning "atomicSet(volatile FXuint*,FXuint): not implemented."
215  FXuint ret=*ptr; *ptr=v;
216 #endif
217  }
218 
219 
221 static inline FXuint atomicCas(volatile FXuint* ptr,FXuint expect,FXuint v){
222 #if defined(_WIN32) && (_MSC_VER >= 1500)
223  return _InterlockedCompareExchange((volatile long*)ptr,(long)v,(long)expect);
224 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
225  FXuint ex=expect;
226  __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
227  return ex;
228 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
229  return __sync_val_compare_and_swap(ptr,expect,v);
230 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
231  FXuint ret;
232  __asm__ __volatile__("lock\n\t"
233  "cmpxchgl %2,(%1)\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
234  return ret;
235 #else
236 #warning "atomicCas(volatile FXuint*,FXuint,FXuint): not implemented."
237  FXuint ret=*ptr;
238  if(*ptr==expect){ *ptr=v; }
239  return ret;
240 #endif
241  }
242 
243 
245 static inline FXbool atomicBoolCas(volatile FXuint* ptr,FXuint expect,FXuint v){
246 #if defined(_WIN32) && (_MSC_VER >= 1500)
247  return (_InterlockedCompareExchange((volatile long*)ptr,(long)v,(long)expect)==(long)expect);
248 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_INT_LOCK_FREE == 2)
249  FXuint ex=expect;
250  return __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
251 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
252  return __sync_bool_compare_and_swap(ptr,expect,v);
253 #else
254 #warning "atomicBoolCas(volatile FXuint*,FXuint,FXuint): not implemented."
255  if(*ptr==expect){ *ptr=v; return true; }
256  return false;
257 #endif
258  }
259 
260 
262 static inline FXuint atomicAdd(volatile FXuint* ptr,FXuint v){
263 #if defined(_WIN32) && (_MSC_VER >= 1500)
264  return _InterlockedExchangeAdd((volatile long*)ptr,(long)v);
265 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
266  return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
267 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
268  return __sync_fetch_and_add(ptr,v);
269 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
270  FXuint ret=v;
271  __asm__ __volatile__ ("lock\n\t"
272  "xaddl %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(ret) : "memory", "cc");
273  return ret;
274 #else
275 #warning "atomicAdd(volatile FXuint*,FXuint): not implemented."
276  FXuint ret=*ptr; *ptr+=v;
277  return ret;
278 #endif
279  }
280 
281 
283 static inline FXuint atomicAnd(volatile FXuint* ptr,FXuint v){
284 #if defined(_WIN32) && (_MSC_VER >= 1500)
285  return _InterlockedAnd((volatile long*)ptr,(long)v);
286 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
287  return __atomic_fetch_and(ptr,v,__ATOMIC_SEQ_CST);
288 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
289  return __sync_fetch_and_and(ptr,v);
290 #else
291 #warning "atomicAnd(volatile FXuint*,FXuint): not implemented."
292  FXuint ret=*ptr; *ptr&=v;
293  return ret;
294 #endif
295  }
296 
297 
299 static inline FXuint atomicOr(volatile FXuint* ptr,FXuint v){
300 #if defined(_WIN32) && (_MSC_VER >= 1500)
301  return _InterlockedOr((volatile long*)ptr,(long)v);
302 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
303  return __atomic_fetch_or(ptr,v,__ATOMIC_SEQ_CST);
304 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
305  return __sync_fetch_and_or(ptr,v);
306 #else
307 #warning "atomicOr(volatile FXuint*,FXuint): not implemented."
308  FXuint ret=*ptr; *ptr|=v;
309  return ret;
310 #endif
311  }
312 
313 
315 static inline FXuint atomicXor(volatile FXuint* ptr,FXuint v){
316 #if defined(_WIN32) && (_MSC_VER >= 1500)
317  return _InterlockedXor((volatile long*)ptr,(long)v);
318 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST)
319  return __atomic_fetch_xor(ptr,v,__ATOMIC_SEQ_CST);
320 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
321  return __sync_fetch_and_xor(ptr,v);
322 #else
323 #warning "atomicXor(volatile FXuint*,FXuint): not implemented."
324  FXuint ret=*ptr; *ptr^=v;
325  return ret;
326 #endif
327  }
328 
329 
331 static inline FXuint atomicMin(volatile FXuint* ptr,FXuint v){
332  FXuint old;
333  while(v<(old=*ptr) && !atomicBoolCas(ptr,old,v)){ }
334  return old;
335  }
336 
337 
339 static inline FXuint atomicMax(volatile FXuint* ptr,FXuint v){
340  FXuint old;
341  while((old=*ptr)<v && !atomicBoolCas(ptr,old,v)){ }
342  return old;
343  }
344 
345 
347 
348 
350 static inline FXlong atomicSet(volatile FXlong* ptr,FXlong v){
351 #if defined(_WIN32) && (_MSC_VER >= 1800)
352  return _InterlockedExchange64(ptr,v);
353 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
354  return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
355 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
356  return __sync_lock_test_and_set(ptr,v);
357 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__)))
358  FXlong ret;
359  __asm__ __volatile__ ("xchgl %%esi, %%ebx\n\t"
360  "1:\n\t"
361  "lock\n\t"
362  "cmpxchg8b (%1)\n\t"
363  "jnz 1b\n\t"
364  "xchgl %%esi, %%ebx\n\t" : "=A"(ret) : "D"(ptr), "S"((TInt)v), "c"((TInt)(v>>32)), "A"(*ptr) : "memory", "cc");
365  return ret;
366 #elif (defined(__GNUC__) && defined(__i386__))
367  FXlong ret;
368  __asm__ __volatile__ ("1:\n\t"
369  "lock\n\t"
370  "cmpxchg8b (%1)\n\t"
371  "jnz 1b\n\t" : "=A"(ret) : "D"(ptr), "b"((TInt)v), "c"((TInt)(v>>32)), "A"(*ptr) : "memory", "cc");
372  return ret;
373 #elif (defined(__GNUC__) && defined(__x86_64__))
374  FXlong ret;
375  __asm__ __volatile__("xchgq %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(v) : "memory", "cc");
376  return ret;
377 #else
378 #warning "atomicSet(volatile FXlong*,FXlong): not implemented."
379  FXlong ret=*ptr; *ptr=v;
380  return ret;
381 #endif
382  }
383 
384 
386 static inline FXlong atomicCas(volatile FXlong* ptr,FXlong expect,FXlong v){
387 #if defined(_WIN32) && (_MSC_VER >= 1800)
388  return _InterlockedCompareExchange64(ptr,v,expect);
389 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
390  FXlong ex=expect;
391  __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
392  return ex;
393 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
394  return __sync_val_compare_and_swap(ptr,expect,v);
395 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__)))
396  FXlong ret;
397  __asm__ __volatile__ ("xchgl %%esi, %%ebx\n\t"
398  "lock\n\t"
399  "cmpxchg8b (%1)\n\t"
400  "movl %%esi, %%ebx\n\t" : "=A"(ret) : "D"(ptr), "S"((FXuint)v), "c"((FXuint)(v>>32)), "A"(expect) : "memory", "cc");
401  return ret;
402 #elif (defined(__GNUC__) && defined(__i386__))
403  FXlong ret;
404  __asm__ __volatile__ ("lock\n\t"
405  "cmpxchg8b (%1)\n\t" : "=A"(ret) : "D"(ptr), "b"((FXuint)v), "c"((FXuint)(v>>32)), "A"(expect) : "memory", "cc");
406  return ret;
407 #elif (defined(__GNUC__) && defined(__x86_64__))
408  FXlong ret;
409  __asm__ __volatile__("lock\n\t"
410  "cmpxchgq %2,(%1)\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
411  return ret;
412 #else
413 #warning "atomicCas(volatile FXlong*,FXlong,FXlong): not implemented."
414  FXlong ret=*ptr;
415  if(*ptr==expect){ *ptr=v; }
416  return ret;
417 #endif
418  }
419 
420 
422 static inline FXbool atomicBoolCas(volatile FXlong* ptr,FXlong expect,FXlong v){
423 #if defined(_WIN32) && (_MSC_VER >= 1800)
424  return (_InterlockedCompareExchange64(ptr,v,expect)==expect);
425 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
426  FXlong ex=expect;
427  return __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
428 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
429  return __sync_bool_compare_and_swap(ptr,expect,v);
430 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__)))
431  FXbool ret;
432  __asm__ __volatile__ ("xchgl %%esi, %%ebx\n\t"
433  "lock\n\t"
434  "cmpxchg8b (%1)\n\t"
435  "setz %%al\n\t"
436  "andl $1, %%eax\n\t"
437  "xchgl %%esi, %%ebx\n\t" : "=A"(ret) : "D"(ptr), "S"((FXuint)v), "c"((FXuint)(v>>32)), "A"(expect) : "memory", "cc");
438  return ret;
439 #elif (defined(__GNUC__) && defined(__i386__))
440  FXbool ret;
441  __asm__ __volatile__ ("lock\n\t"
442  "cmpxchg8b (%1)\n\t"
443  "setz %%al\n\t"
444  "andl $1, %%eax\n\t" : "=a"(ret) : "D"(ptr), "b"((FXuint)v), "c"((FXuint)(v>>32)), "A"(expect) : "memory", "cc");
445  return ret;
446 #elif (defined(__GNUC__) && defined(__x86_64__))
447  FXbool ret;
448  __asm__ __volatile__ ("lock\n\t"
449  "cmpxchgq %2,(%1)\n\t"
450  "sete %%al\n\t"
451  "andq $1, %%rax\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
452  return ret;
453 #else
454 #warning "atomicBoolCas(volatile FXlong*,FXlong,FXlong): not implemented."
455  if(*ptr==expect){ *ptr=v; return true; }
456  return false;
457 #endif
458  }
459 
460 
462 static inline FXlong atomicAdd(volatile FXlong* ptr,FXlong v){
463 #if defined(_WIN32) && (_MSC_VER >= 1800)
464  return _InterlockedExchangeAdd64(ptr,v);
465 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
466  return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
467 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
468  return __sync_fetch_and_add(ptr,v);
469 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__)))
470  FXuint inclo=v;
471  FXuint inchi=(v>>32);
472  FXlong ret;
473  __asm __volatile("movl %%ebx, %%esi\n\t"
474  "1:\n\t"
475  "movl %2, %%ebx\n\t"
476  "movl %3, %%ecx\n\t"
477  "addl %%eax, %%ebx\n\t"
478  "addc %%edx, %%ecx\n\t"
479  "lock\n\t"
480  "cmpxchg8b (%1)\n\t"
481  "jnz 1b\n\t"
482  "movl %%esi, %%ebx\n\t" : "=A"(ret) : "D"(ptr), "m"(inclo), "m"(inchi), "A"(*ptr) : "esi", "memory", "cc");
483  return ret;
484 #elif (defined(__GNUC__) && defined(__i386__))
485  FXuint inclo=v;
486  FXuint inchi=(v>>32);
487  FXlong ret;
488  __asm __volatile("1:\n\t"
489  "movl %2, %%ebx\n\t"
490  "movl %3, %%ecx\n\t"
491  "addl %%eax, %%ebx\n\t"
492  "addc %%edx, %%ecx\n\t"
493  "lock\n\t"
494  "cmpxchg8b (%1)\n\t"
495  "jnz 1b\n\t" : "=A"(ret) : "D"(ptr), "m"(inclo), "m"(inchi), "A"(*ptr) : "memory", "cc");
496  return ret;
497 #elif (defined(__GNUC__) && defined(__x86_64__))
498  FXlong ret;
499  __asm__ __volatile__ ("lock\n\t"
500  "xaddq %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(v) : "memory", "cc");
501  return ret;
502 #else
503 #warning "atomicAdd(volatile FXlong*,FXlong): not implemented."
504  FXlong ret=*ptr; *ptr+=v;
505  return ret;
506 #endif
507  }
508 
509 
511 static inline FXlong atomicAnd(volatile FXlong* ptr,FXlong v){
512 #if defined(_WIN32) && (_MSC_VER >= 1800)
513  return _InterlockedAnd64(ptr,v);
514 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
515  return __atomic_fetch_and(ptr,v,__ATOMIC_SEQ_CST);
516 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
517  return __sync_fetch_and_and(ptr,v);
518 #else
519 #warning "atomicAnd(volatile FXlong*,FXlong): not implemented."
520  FXlong ret=*ptr; *ptr&=v;
521  return ret;
522 #endif
523  }
524 
525 
527 static inline FXlong atomicOr(volatile FXlong* ptr,FXlong v){
528 #if defined(_WIN32) && (_MSC_VER >= 1800)
529  return _InterlockedOr64(ptr,v);
530 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
531  return __atomic_fetch_or(ptr,v,__ATOMIC_SEQ_CST);
532 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
533  return __sync_fetch_and_or(ptr,v);
534 #else
535 #warning "atomicOr(volatile FXlong*,FXlong): not implemented."
536  FXlong ret=*ptr; *ptr|=v;
537  return ret;
538 #endif
539  }
540 
541 
543 static inline FXlong atomicXor(volatile FXlong* ptr,FXlong v){
544 #if defined(_WIN32) && (_MSC_VER >= 1800)
545  return _InterlockedXor64(ptr,v);
546 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
547  return __atomic_fetch_xor(ptr,v,__ATOMIC_SEQ_CST);
548 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
549  return __sync_fetch_and_xor(ptr,v);
550 #else
551 #warning "atomicXor(volatile FXlong*,FXlong): not implemented."
552  FXlong ret=*ptr; *ptr^=v;
553  return ret;
554 #endif
555  }
556 
557 
559 static inline FXlong atomicMin(volatile FXlong* ptr,FXlong v){
560  FXlong old;
561  while(v<(old=*ptr) && !atomicBoolCas(ptr,old,v)){ }
562  return old;
563  }
564 
565 
567 static inline FXlong atomicMax(volatile FXlong* ptr,FXlong v){
568  FXlong old;
569  while((old=*ptr)<v && !atomicBoolCas(ptr,old,v)){ }
570  return old;
571  }
572 
573 
575 
576 
578 static inline FXulong atomicSet(volatile FXulong* ptr,FXulong v){
579 #if defined(_WIN32) && (_MSC_VER >= 1800)
580  return _InterlockedExchange64((volatile FXlong*)ptr,(FXlong)v);
581 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
582  return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
583 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
584  return __sync_lock_test_and_set(ptr,v);
585 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__)))
586  FXulong ret;
587  __asm__ __volatile__ ("xchgl %%esi, %%ebx\n\t"
588  "1:\n\t"
589  "lock\n\t"
590  "cmpxchg8b (%1)\n\t"
591  "jnz 1b\n\t"
592  "xchgl %%esi, %%ebx\n\t" : "=A"(ret) : "D"(ptr), "S"((FXuint)v), "c"((FXuint)(v>>32)), "A"(*ptr) : "memory", "cc");
593  return ret;
594 #elif (defined(__GNUC__) && defined(__i386__))
595  FXulong ret;
596  __asm__ __volatile__ ("1:\n\t"
597  "lock\n\t"
598  "cmpxchg8b (%1)\n\t"
599  "jnz 1b\n\t" : "=A"(ret) : "D"(ptr), "b"((FXuint)v), "c"((FXuint)(v>>32)), "A"(*ptr) : "memory", "cc");
600  return ret;
601 #elif (defined(__GNUC__) && defined(__x86_64__))
602  FXulong ret;
603  __asm__ __volatile__("xchgq %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(v) : "memory", "cc");
604  return ret;
605 #else
606 #warning "atomicSet(volatile FXulong*,FXulong): not implemented."
607  FXulong ret=*ptr; *ptr=v;
608  return ret;
609 #endif
610  }
611 
612 
614 static inline FXulong atomicCas(volatile FXulong* ptr,FXulong expect,FXulong v){
615 #if defined(_WIN32) && (_MSC_VER >= 1800)
616  return _InterlockedCompareExchange64((volatile FXlong*)ptr,(FXlong)v,(FXlong)expect);
617 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
618  FXulong ex=expect;
619  __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
620  return ex;
621 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
622  return __sync_val_compare_and_swap(ptr,expect,v);
623 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__)))
624  FXulong ret;
625  __asm__ __volatile__ ("xchgl %%esi, %%ebx\n\t"
626  "lock\n\t"
627  "cmpxchg8b (%1)\n\t"
628  "movl %%esi, %%ebx\n\t" : "=A"(ret) : "D"(ptr), "S"((FXuint)v), "c"((FXuint)(v>>32)), "A"(expect) : "memory", "cc");
629  return ret;
630 #elif (defined(__GNUC__) && defined(__i386__))
631  FXulong ret;
632  __asm__ __volatile__ ("lock\n\t"
633  "cmpxchg8b (%1)\n\t" : "=A"(ret) : "D"(ptr), "b"((FXuint)v), "c"((FXuint)(v>>32)), "A"(expect) : "memory", "cc");
634  return ret;
635 #elif (defined(__GNUC__) && defined(__x86_64__))
636  FXulong ret;
637  __asm__ __volatile__("lock\n\t"
638  "cmpxchgq %2,(%1)\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
639  return ret;
640 #else
641 #warning "atomicCas(volatile FXulong*,FXulong,FXulong): not implemented."
642  FXulong ret=*ptr;
643  if(*ptr==expect){ *ptr=v; }
644  return ret;
645 #endif
646  }
647 
648 
650 static inline FXbool atomicBoolCas(volatile FXulong* ptr,FXulong expect,FXulong v){
651 #if defined(_WIN32) && (_MSC_VER >= 1800)
652  return (_InterlockedCompareExchange64((volatile FXlong*)ptr,(FXlong)v,(FXlong)expect)==(FXlong)expect);
653 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
654  FXulong ex=expect;
655  return __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
656 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
657  return __sync_bool_compare_and_swap(ptr,expect,v);
658 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__)))
659  FXbool ret;
660  __asm__ __volatile__ ("xchgl %%esi, %%ebx\n\t"
661  "lock\n\t"
662  "cmpxchg8b (%1)\n\t"
663  "setz %%al\n\t"
664  "andl $1, %%eax\n\t"
665  "xchgl %%esi, %%ebx\n\t" : "=A"(ret) : "D"(ptr), "S"((FXuint)v), "c"((FXuint)(v>>32)), "A"(expect) : "memory", "cc");
666  return ret;
667 #elif (defined(__GNUC__) && defined(__i386__))
668  FXbool ret;
669  __asm__ __volatile__ ("lock\n\t"
670  "cmpxchg8b (%1)\n\t"
671  "setz %%al\n\t"
672  "andl $1, %%eax\n\t" : "=a"(ret) : "D"(ptr), "b"((FXuint)v), "c"((FXuint)(v>>32)), "A"(expect) : "memory", "cc");
673  return ret;
674 #elif (defined(__GNUC__) && defined(__x86_64__))
675  FXbool ret;
676  __asm__ __volatile__ ("lock\n\t"
677  "cmpxchgq %2,(%1)\n\t"
678  "sete %%al\n\t"
679  "andq $1, %%rax\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
680  return ret;
681 #else
682 #warning "atomicBoolCas(volatile FXulong*,FXulong,FXulong): not implemented."
683  if(*ptr==expect){ *ptr=v; return true; }
684  return false;
685 #endif
686  }
687 
688 
690 static inline FXulong atomicAdd(volatile FXulong* ptr,FXulong v){
691 #if defined(_WIN32) && (_MSC_VER >= 1800)
692  return _InterlockedExchangeAdd64((volatile FXlong*)ptr,(FXlong)v);
693 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
694  return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
695 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
696  return __sync_fetch_and_add(ptr,v);
697 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__)))
698  FXuint inclo=v;
699  FXuint inchi=(v>>32);
700  FXulong ret;
701  __asm __volatile("movl %%ebx, %%esi\n\t"
702  "1:\n\t"
703  "movl %2, %%ebx\n\t"
704  "movl %3, %%ecx\n\t"
705  "addl %%eax, %%ebx\n\t"
706  "addc %%edx, %%ecx\n\t"
707  "lock\n\t"
708  "cmpxchg8b (%1)\n\t"
709  "jnz 1b\n\t"
710  "movl %%esi, %%ebx\n\t" : "=A"(ret) : "D"(ptr), "m"(inclo), "m"(inchi), "A"(*ptr) : "esi", "memory", "cc");
711  return ret;
712 #elif (defined(__GNUC__) && defined(__i386__))
713  FXuint inclo=v;
714  FXuint inchi=(v>>32);
715  FXulong ret;
716  __asm __volatile("1:\n\t"
717  "movl %2, %%ebx\n\t"
718  "movl %3, %%ecx\n\t"
719  "addl %%eax, %%ebx\n\t"
720  "addc %%edx, %%ecx\n\t"
721  "lock\n\t"
722  "cmpxchg8b (%1)\n\t"
723  "jnz 1b\n\t" : "=A"(ret) : "D"(ptr), "m"(inclo), "m"(inchi), "A"(*ptr) : "memory", "cc");
724  return ret;
725 #elif (defined(__GNUC__) && defined(__x86_64__))
726  FXulong ret;
727  __asm__ __volatile__ ("lock\n\t"
728  "xaddq %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(v) : "memory", "cc");
729  return ret;
730 #else
731 #warning "atomicAdd(volatile FXulong*,FXulong): not implemented."
732  FXulong ret=*ptr; *ptr+=v;
733  return ret;
734 #endif
735  }
736 
737 
739 static inline FXulong atomicAnd(volatile FXulong* ptr,FXulong v){
740 #if defined(_WIN32) && (_MSC_VER >= 1800)
741  return _InterlockedAnd64((volatile FXlong*)ptr,(FXlong)v);
742 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
743  return __atomic_fetch_and(ptr,v,__ATOMIC_SEQ_CST);
744 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
745  return __sync_fetch_and_and(ptr,v);
746 #else
747 #warning "atomicAnd(volatile FXulong*,FXulong): not implemented."
748  FXulong ret=*ptr; *ptr&=v;
749  return ret;
750 #endif
751  }
752 
753 
755 static inline FXulong atomicOr(volatile FXulong* ptr,FXulong v){
756 #if defined(_WIN32) && (_MSC_VER >= 1800)
757  return _InterlockedOr64((volatile FXlong*)ptr,(FXlong)v);
758 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
759  return __atomic_fetch_or(ptr,v,__ATOMIC_SEQ_CST);
760 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
761  return __sync_fetch_and_or(ptr,v);
762 #else
763 #warning "atomicOr(volatile FXulong*,FXulong): not implemented."
764  FXulong ret=*ptr; *ptr|=v;
765  return ret;
766 #endif
767  }
768 
769 
771 static inline FXulong atomicXor(volatile FXulong* ptr,FXulong v){
772 #if defined(_WIN32) && (_MSC_VER >= 1800)
773  return _InterlockedXor64((volatile FXlong*)ptr,(FXlong)v);
774 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2)
775  return __atomic_fetch_xor(ptr,v,__ATOMIC_SEQ_CST);
776 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
777  return __sync_fetch_and_xor(ptr,v);
778 #else
779 #warning "atomicXor(volatile FXulong*,FXulong): not implemented."
780  FXulong ret=*ptr; *ptr|=v;
781  return ret;
782 #endif
783  }
784 
785 
787 static inline FXulong atomicMin(volatile FXulong* ptr,FXulong v){
788  FXulong old;
789  while(v<(old=*ptr) && !atomicBoolCas(ptr,old,v)){ }
790  return old;
791  }
792 
793 
795 static inline FXulong atomicMax(volatile FXulong* ptr,FXulong v){
796  FXulong old;
797  while((old=*ptr)<v && !atomicBoolCas(ptr,old,v)){ }
798  return old;
799  }
800 
801 
803 
804 
806 static inline FXptr atomicSet(volatile FXptr* ptr,FXptr v){
807 #if defined(_WIN32) && (_MSC_VER >= 1800)
808  return (FXptr)_InterlockedExchangePointer(ptr, v);
809 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_POINTER_LOCK_FREE == 2)
810  return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
811 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
812  return (TPtr)__sync_lock_test_and_set(ptr,v);
813 #elif (defined(__GNUC__) && defined(__i386__))
814  FXptr ret=v;
815  __asm__ __volatile__("xchgl %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(ret) : "memory", "cc");
816  return ret;
817 #elif (defined(__GNUC__) && defined(__x86_64__))
818  FXptr ret=v;
819  __asm__ __volatile__("xchgq %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(ret) : "memory", "cc");
820  return ret;
821 #else
822 #warning "atomicSet(volatile FXptr*,FXptr): not implemented."
823  FXptr ret=*ptr; *ptr=v;
824  return ret;
825 #endif
826  }
827 
828 
830 static inline FXptr atomicCas(volatile FXptr* ptr,FXptr expect,FXptr v){
831 #if defined(_WIN32) && (_MSC_VER >= 1800)
832  return _InterlockedCompareExchangePointer((void**)ptr,v,expect);
833 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_POINTER_LOCK_FREE == 2)
834  FXptr ex=expect;
835  __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
836  return ex;
837 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
838  return __sync_val_compare_and_swap(ptr,expect,v);
839 #elif (defined(__GNUC__) && defined(__i386__))
840  FXptr ret;
841  __asm__ __volatile__("lock\n\t"
842  "cmpxchgl %2,(%1)\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
843  return ret;
844 #elif (defined(__GNUC__) && defined(__x86_64__))
845  FXptr ret;
846  __asm__ __volatile__("lock\n\t"
847  "cmpxchgq %2,(%1)\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
848  return ret;
849 #else
850 #warning "atomicCas(volatile FXptr*,FXptr,FXptr): not implemented."
851  FXptr ret=*ptr;
852  if(*ptr==expect){ *ptr=v; }
853  return ret;
854 #endif
855  }
856 
857 
859 static inline FXbool atomicBoolCas(volatile FXptr* ptr,FXptr expect,FXptr v){
860 #if defined(_WIN32) && (_MSC_VER >= 1800)
861  return (_InterlockedCompareExchangePointer((void**)ptr,v,expect)==expect);
862 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_POINTER_LOCK_FREE == 2)
863  FXptr ex=expect;
864  return __atomic_compare_exchange_n(ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
865 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
866  return __sync_bool_compare_and_swap(ptr,expect,v);
867 #elif (defined(__GNUC__) && defined(__i386__))
868  FXbool ret;
869  __asm__ __volatile__ ("lock\n\t"
870  "cmpxchgl %2,(%1)\n\t"
871  "sete %%al\n\t"
872  "andl $1, %%eax\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
873  return ret;
874 #elif (defined(__GNUC__) && defined(__x86_64__))
875  FXbool ret;
876  __asm__ __volatile__ ("lock\n\t"
877  "cmpxchgq %2,(%1)\n\t"
878  "sete %%al\n\t"
879  "andq $1, %%rax\n\t" : "=a"(ret) : "r"(ptr), "r"(v), "a"(expect) : "memory", "cc");
880  return ret;
881 #else
882 #warning "atomicBoolCas(volatile FXptr*,FXptr,FXptr): not implemented."
883  if(*ptr==expect){ *ptr=v; return true; }
884  return false;
885 #endif
886  }
887 
888 
890 static inline FXbool atomicBoolDCas(volatile FXptr* ptr,FXptr cmpa,FXptr cmpb,FXptr a,FXptr b){
891 #if (defined(_WIN32) && (_MSC_VER >= 1500) && defined(_WIN64))
892  FXlong duet[2]={(FXlong)a,(FXlong)b};
893  return !!(_InterlockedCompareExchange128((volatile FXlong*)ptr,(FXlong)cmpb,(FXlong)cmpa,duet));
894 #elif (defined(_WIN32) && (_MSC_VER >= 1500))
895  __int64 ab=(((__int64)(FXuval)a)|((__int64)(FXuval)b)<<32);
896  __int64 compab=(((__int64)(FXuval)cmpa)|((__int64)(FXuval)cmpb)<<32);
897  return (InterlockedCompareExchange64((__int64 volatile *)ptr,ab,compab)==compab);
898 //#elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && defined(ILP32)
899 // FXulong ex=(((FXulong)(FXuval)cmpa)|((FXulong)(FXuval)cmpb)<<32);
900 // FXulong v=(((FXulong)(FXuval)a)|((FXulong)(FXuval)b)<<32);
901 // return __atomic_compare_exchange_n((volatile TLong*)ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
902 //#elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (defined(LLP64) || defined(LP64))
903 // __int128 ex=(((__int128)(FXuval)cmpa)|((__int128)(FXuval)cmpb)<<32);
904 // __int128 v=(((__int128)(FXuval)a)|((__int128)(FXuval)b)<<32);
905 // return __atomic_compare_exchange_n((volatile __int128*)ptr,&ex,v,false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
906 //#elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
907 // __int128 ex=(((__int128)(FXuval)cmpa)|((__int128)(FXuval)cmpb)<<32);
908 // __int128 v=(((__int128)(FXuval)a)|((__int128)(FXuval)b)<<32);
909 // return __sync_bool_compare_and_swap((volatile __int128*)ptr,ex,v);
910 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__)))
911  FXptr ret;
912  __asm__ __volatile__ ("xchgl %%esi, %%ebx\n\t"
913  "lock\n\t"
914  "cmpxchg8b (%1)\n\t"
915  "setz %%al\n\t"
916  "andl $1, %%eax\n\t"
917  "xchgl %%esi, %%ebx\n\t" : "=a"(ret) : "D"(ptr), "a"(cmpa), "d"(cmpb), "S"(a), "c"(b) : "memory", "cc");
918  return ret;
919 #elif (defined(__GNUC__) && defined(__i386__))
920  FXptr ret;
921  __asm__ __volatile__ ("lock\n\t"
922  "cmpxchg8b (%1)\n\t"
923  "setz %%al\n\t"
924  "andl $1, %%eax\n\t" : "=a"(ret) : "D"(ptr), "a"(cmpa), "d"(cmpb), "b"(a), "c"(b) : "memory", "cc");
925  return ret;
926 #elif (defined(__GNUC__) && defined(__x86_64__))
927  FXptr ret;
928  __asm__ __volatile__ ("lock\n\t"
929  "cmpxchg16b (%1)\n\t"
930  "setz %%al\n\t"
931  "andq $1, %%rax\n\t" : "=a"(ret) : "r"(ptr), "a"(cmpa), "d"(cmpb), "b"(a), "c"(b) : "memory", "cc");
932  return ret;
933 #else
934 #warning "atomicBoolDCas(volatile FXptr*,FXptr,FXptr,FXptr,FXptr): not implemented."
935  if(ptr[0]==cmpa && ptr[1]==cmpb){ ptr[0]=a; ptr[1]=b; return true; }
936  return false;
937 #endif
938  }
939 
940 
942 static inline FXptr atomicAdd(volatile FXptr* ptr,FXival v){
943 #if defined(_WIN32) && (_MSC_VER >= 1800) && defined(_WIN64)
944  return (FXptr)_InterlockedExchangeAdd64((volatile FXlong*)ptr,(FXlong)v);
945 #elif defined(_WIN32) && (_MSC_VER >= 1800)
946  return (FXptr)InterlockedExchangeAdd((volatile long*)ptr,(long)v);
947 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_POINTER_LOCK_FREE == 2)
948  return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
949 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)))
950  return __sync_fetch_and_add(ptr,v);
951 #elif (defined(__GNUC__) && defined(__i386__))
952  FXptr ret=(TPtr)v;
953  __asm__ __volatile__ ("lock\n\t"
954  "xaddl %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0"(ret) : "memory", "cc");
955  return ret;
956 #elif (defined(__GNUC__) && defined(__x86_64__))
957  FXptr ret=(TPtr)v;
958  __asm__ __volatile__ ("lock\n\t"
959  "xaddq %0,(%1)\n\t" : "=r"(ret) : "r"(ptr), "0" (ret) : "memory", "cc");
960  return ret;
961 #else
962 #warning "atomicAdd(volatile FXptr*,FXival): not implemented."
963  FXptr ret=*ptr; *ptr+=v;
964  return ret;
965 #endif
966  }
967 
968 
970 
971 
973 template <typename EType>
974 static inline EType* atomicSet(EType *volatile *ptr,EType* v){
975  return (EType*)atomicSet((volatile FXptr*)ptr,(FXptr)v);
976  }
977 
978 
980 template <typename EType>
981 static inline EType* atomicAdd(EType *volatile *ptr,FXival v){
982  return (EType*)atomicAdd((volatile FXptr*)ptr,v*((FXival)sizeof(EType)));
983  }
984 
985 
987 template <typename EType>
988 static inline EType* atomicCas(EType *volatile *ptr,EType* expect,EType* v){
989  return (EType*)atomicCas((volatile FXptr*)ptr,(FXptr)expect,(FXptr)v);
990  }
991 
992 
994 template <typename EType>
995 static inline FXbool atomicBoolCas(EType *volatile *ptr,EType* expect,EType* v){
996  return atomicBoolCas((volatile FXptr*)ptr,(FXptr)expect,(FXptr)v);
997  }
998 
999 
1001 template <typename EType>
1002 static inline FXbool atomicBoolDCas(volatile EType* ptr,EType cmpa,EType cmpb,EType a,EType b){
1003  return atomicBoolDCas((volatile FXptr*)ptr,(FXptr)cmpa,(FXptr)cmpb,(FXptr)a,(FXptr)b);
1004  }
1005 
1006 }
1007 
1008 #endif
Definition: FX4Splitter.h:28

Copyright © 1997-2022 Jeroen van der Zijp