29 static inline void atomicThreadFence(){
30 #if defined(_WIN32) && (_MSC_VER >= 1500) 32 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 33 __atomic_thread_fence(__ATOMIC_SEQ_CST);
34 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 37 #warning "atomicThreadFence(): not implemented." 46 static inline FXint atomicSet(
volatile FXint* ptr,FXint v){
47 #if defined(_WIN32) && (_MSC_VER >= 1500) 48 return _InterlockedExchange((
volatile long*)ptr,(
long)v);
49 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 50 return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
51 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 52 return __sync_lock_test_and_set(ptr,v);
53 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) 55 __asm__ __volatile__(
"xchgl %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(ret) :
"memory",
"cc");
58 #warning "atomicSet(volatile FXint*,FXint): not implemented." 59 FXint ret=*ptr; *ptr=v;
66 static inline FXint atomicCas(
volatile FXint* ptr,FXint expect,FXint v){
67 #if defined(_WIN32) && (_MSC_VER >= 1500) 68 return _InterlockedCompareExchange((
volatile long*)ptr,(
long)v,(
long)expect);
69 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 71 __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
73 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 74 return __sync_val_compare_and_swap(ptr,expect,v);
75 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) 77 __asm__ __volatile__(
"lock\n\t" 78 "cmpxchgl %2,(%1)\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
81 #warning "atomicCas(volatile FXint*,FXint,FXint): not implemented." 83 if(*ptr==expect){ *ptr=v; }
90 static inline FXbool atomicBoolCas(
volatile FXint* ptr,FXint expect,FXint v){
91 #if defined(_WIN32) && (_MSC_VER >= 1500) 92 return (_InterlockedCompareExchange((
volatile long*)ptr,(
long)v,(
long)expect)==(
long)expect);
93 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_INT_LOCK_FREE == 2) 95 return __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
96 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 97 return __sync_bool_compare_and_swap(ptr,expect,v);
98 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) 100 __asm__ __volatile__ (
"lock\n\t" 101 "cmpxchgl %2,(%1)\n\t" 103 "andl $1, %%eax\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
106 #warning "atomicBoolCas(volatile FXint*,FXint,FXint): not implemented." 107 if(*ptr==expect){ *ptr=v;
return true; }
114 static inline FXint atomicAdd(
volatile FXint* ptr,FXint v){
115 #if defined(_WIN32) && (_MSC_VER >= 1500) 116 return _InterlockedExchangeAdd((
volatile long*)ptr,(
long)v);
117 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 118 return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
119 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 120 return __sync_fetch_and_add(ptr,v);
121 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) 123 __asm__ __volatile__ (
"lock\n\t" 124 "xaddl %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(ret) :
"memory",
"cc");
127 #warning "atomicAdd(volatile FXint*,FXint): not implemented." 128 FXint ret=*ptr; *ptr+=v;
135 static inline FXint atomicAnd(
volatile FXint* ptr,FXint v){
136 #if defined(_WIN32) && (_MSC_VER >= 1500) 137 return _InterlockedAnd((
volatile long*)ptr,(
long)v);
138 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 139 return __atomic_fetch_and(ptr,v,__ATOMIC_SEQ_CST);
140 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 141 return __sync_fetch_and_and(ptr,v);
143 #warning "atomicAnd(volatile FXint*,FXint): not implemented." 144 FXint ret=*ptr; *ptr&=v;
151 static inline FXint atomicOr(
volatile FXint* ptr,FXint v){
152 #if defined(_WIN32) && (_MSC_VER >= 1500) 153 return _InterlockedOr((
volatile long*)ptr,(
long)v);
154 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 155 return __atomic_fetch_or(ptr,v,__ATOMIC_SEQ_CST);
156 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 157 return __sync_fetch_and_or(ptr,v);
159 #warning "atomicOr(volatile FXint*,FXint): not implemented." 160 FXint ret=*ptr; *ptr|=v;
167 static inline FXint atomicXor(
volatile FXint* ptr,FXint v){
168 #if defined(_WIN32) && (_MSC_VER >= 1500) 169 return _InterlockedXor((
volatile long*)ptr,(
long)v);
170 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 171 return __atomic_fetch_xor(ptr,v,__ATOMIC_SEQ_CST);
172 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 173 return __sync_fetch_and_xor(ptr,v);
175 #warning "atomicXor(volatile FXint*,FXint): not implemented." 176 FXint ret=*ptr; *ptr^=v;
183 static inline FXint atomicMin(
volatile FXint* ptr,FXint v){
185 while(v<(old=*ptr) && !atomicBoolCas(ptr,old,v)){ }
191 static inline FXint atomicMax(
volatile FXint* ptr,FXint v){
193 while((old=*ptr)<v && !atomicBoolCas(ptr,old,v)){ }
202 static inline FXuint atomicSet(
volatile FXuint* ptr,FXuint v){
203 #if defined(_WIN32) && (_MSC_VER >= 1500) 204 return _InterlockedExchange((
volatile long*)ptr,(
long)v);
205 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 206 return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
207 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 208 return __sync_lock_test_and_set(ptr,v);
209 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) 211 __asm__ __volatile__(
"xchgl %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(ret) :
"memory",
"cc");
214 #warning "atomicSet(volatile FXuint*,FXuint): not implemented." 215 FXuint ret=*ptr; *ptr=v;
221 static inline FXuint atomicCas(
volatile FXuint* ptr,FXuint expect,FXuint v){
222 #if defined(_WIN32) && (_MSC_VER >= 1500) 223 return _InterlockedCompareExchange((
volatile long*)ptr,(
long)v,(
long)expect);
224 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 226 __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
228 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 229 return __sync_val_compare_and_swap(ptr,expect,v);
230 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) 232 __asm__ __volatile__(
"lock\n\t" 233 "cmpxchgl %2,(%1)\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
236 #warning "atomicCas(volatile FXuint*,FXuint,FXuint): not implemented." 238 if(*ptr==expect){ *ptr=v; }
245 static inline FXbool atomicBoolCas(
volatile FXuint* ptr,FXuint expect,FXuint v){
246 #if defined(_WIN32) && (_MSC_VER >= 1500) 247 return (_InterlockedCompareExchange((
volatile long*)ptr,(
long)v,(
long)expect)==(
long)expect);
248 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_INT_LOCK_FREE == 2) 250 return __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
251 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 252 return __sync_bool_compare_and_swap(ptr,expect,v);
254 #warning "atomicBoolCas(volatile FXuint*,FXuint,FXuint): not implemented." 255 if(*ptr==expect){ *ptr=v;
return true; }
262 static inline FXuint atomicAdd(
volatile FXuint* ptr,FXuint v){
263 #if defined(_WIN32) && (_MSC_VER >= 1500) 264 return _InterlockedExchangeAdd((
volatile long*)ptr,(
long)v);
265 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 266 return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
267 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 268 return __sync_fetch_and_add(ptr,v);
269 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) 271 __asm__ __volatile__ (
"lock\n\t" 272 "xaddl %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(ret) :
"memory",
"cc");
275 #warning "atomicAdd(volatile FXuint*,FXuint): not implemented." 276 FXuint ret=*ptr; *ptr+=v;
283 static inline FXuint atomicAnd(
volatile FXuint* ptr,FXuint v){
284 #if defined(_WIN32) && (_MSC_VER >= 1500) 285 return _InterlockedAnd((
volatile long*)ptr,(
long)v);
286 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 287 return __atomic_fetch_and(ptr,v,__ATOMIC_SEQ_CST);
288 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 289 return __sync_fetch_and_and(ptr,v);
291 #warning "atomicAnd(volatile FXuint*,FXuint): not implemented." 292 FXuint ret=*ptr; *ptr&=v;
299 static inline FXuint atomicOr(
volatile FXuint* ptr,FXuint v){
300 #if defined(_WIN32) && (_MSC_VER >= 1500) 301 return _InterlockedOr((
volatile long*)ptr,(
long)v);
302 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 303 return __atomic_fetch_or(ptr,v,__ATOMIC_SEQ_CST);
304 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 305 return __sync_fetch_and_or(ptr,v);
307 #warning "atomicOr(volatile FXuint*,FXuint): not implemented." 308 FXuint ret=*ptr; *ptr|=v;
315 static inline FXuint atomicXor(
volatile FXuint* ptr,FXuint v){
316 #if defined(_WIN32) && (_MSC_VER >= 1500) 317 return _InterlockedXor((
volatile long*)ptr,(
long)v);
318 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) 319 return __atomic_fetch_xor(ptr,v,__ATOMIC_SEQ_CST);
320 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 321 return __sync_fetch_and_xor(ptr,v);
323 #warning "atomicXor(volatile FXuint*,FXuint): not implemented." 324 FXuint ret=*ptr; *ptr^=v;
331 static inline FXuint atomicMin(
volatile FXuint* ptr,FXuint v){
333 while(v<(old=*ptr) && !atomicBoolCas(ptr,old,v)){ }
339 static inline FXuint atomicMax(
volatile FXuint* ptr,FXuint v){
341 while((old=*ptr)<v && !atomicBoolCas(ptr,old,v)){ }
350 static inline FXlong atomicSet(
volatile FXlong* ptr,FXlong v){
351 #if defined(_WIN32) && (_MSC_VER >= 1800) 352 return _InterlockedExchange64(ptr,v);
353 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 354 return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
355 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 356 return __sync_lock_test_and_set(ptr,v);
357 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__))) 359 __asm__ __volatile__ (
"xchgl %%esi, %%ebx\n\t" 364 "xchgl %%esi, %%ebx\n\t" :
"=A"(ret) :
"D"(ptr),
"S"((TInt)v),
"c"((TInt)(v>>32)),
"A"(*ptr) :
"memory",
"cc");
366 #elif (defined(__GNUC__) && defined(__i386__)) 368 __asm__ __volatile__ (
"1:\n\t" 371 "jnz 1b\n\t" :
"=A"(ret) :
"D"(ptr),
"b"((TInt)v),
"c"((TInt)(v>>32)),
"A"(*ptr) :
"memory",
"cc");
373 #elif (defined(__GNUC__) && defined(__x86_64__)) 375 __asm__ __volatile__(
"xchgq %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(v) :
"memory",
"cc");
378 #warning "atomicSet(volatile FXlong*,FXlong): not implemented." 379 FXlong ret=*ptr; *ptr=v;
386 static inline FXlong atomicCas(
volatile FXlong* ptr,FXlong expect,FXlong v){
387 #if defined(_WIN32) && (_MSC_VER >= 1800) 388 return _InterlockedCompareExchange64(ptr,v,expect);
389 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 391 __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
393 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 394 return __sync_val_compare_and_swap(ptr,expect,v);
395 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__))) 397 __asm__ __volatile__ (
"xchgl %%esi, %%ebx\n\t" 400 "movl %%esi, %%ebx\n\t" :
"=A"(ret) :
"D"(ptr),
"S"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(expect) :
"memory",
"cc");
402 #elif (defined(__GNUC__) && defined(__i386__)) 404 __asm__ __volatile__ (
"lock\n\t" 405 "cmpxchg8b (%1)\n\t" :
"=A"(ret) :
"D"(ptr),
"b"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(expect) :
"memory",
"cc");
407 #elif (defined(__GNUC__) && defined(__x86_64__)) 409 __asm__ __volatile__(
"lock\n\t" 410 "cmpxchgq %2,(%1)\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
413 #warning "atomicCas(volatile FXlong*,FXlong,FXlong): not implemented." 415 if(*ptr==expect){ *ptr=v; }
422 static inline FXbool atomicBoolCas(
volatile FXlong* ptr,FXlong expect,FXlong v){
423 #if defined(_WIN32) && (_MSC_VER >= 1800) 424 return (_InterlockedCompareExchange64(ptr,v,expect)==expect);
425 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 427 return __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
428 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 429 return __sync_bool_compare_and_swap(ptr,expect,v);
430 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__))) 432 __asm__ __volatile__ (
"xchgl %%esi, %%ebx\n\t" 437 "xchgl %%esi, %%ebx\n\t" :
"=A"(ret) :
"D"(ptr),
"S"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(expect) :
"memory",
"cc");
439 #elif (defined(__GNUC__) && defined(__i386__)) 441 __asm__ __volatile__ (
"lock\n\t" 444 "andl $1, %%eax\n\t" :
"=a"(ret) :
"D"(ptr),
"b"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(expect) :
"memory",
"cc");
446 #elif (defined(__GNUC__) && defined(__x86_64__)) 448 __asm__ __volatile__ (
"lock\n\t" 449 "cmpxchgq %2,(%1)\n\t" 451 "andq $1, %%rax\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
454 #warning "atomicBoolCas(volatile FXlong*,FXlong,FXlong): not implemented." 455 if(*ptr==expect){ *ptr=v;
return true; }
462 static inline FXlong atomicAdd(
volatile FXlong* ptr,FXlong v){
463 #if defined(_WIN32) && (_MSC_VER >= 1800) 464 return _InterlockedExchangeAdd64(ptr,v);
465 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 466 return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
467 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 468 return __sync_fetch_and_add(ptr,v);
469 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__))) 471 FXuint inchi=(v>>32);
473 __asm __volatile(
"movl %%ebx, %%esi\n\t" 477 "addl %%eax, %%ebx\n\t" 478 "addc %%edx, %%ecx\n\t" 482 "movl %%esi, %%ebx\n\t" :
"=A"(ret) :
"D"(ptr),
"m"(inclo),
"m"(inchi),
"A"(*ptr) :
"esi",
"memory",
"cc");
484 #elif (defined(__GNUC__) && defined(__i386__)) 486 FXuint inchi=(v>>32);
488 __asm __volatile(
"1:\n\t" 491 "addl %%eax, %%ebx\n\t" 492 "addc %%edx, %%ecx\n\t" 495 "jnz 1b\n\t" :
"=A"(ret) :
"D"(ptr),
"m"(inclo),
"m"(inchi),
"A"(*ptr) :
"memory",
"cc");
497 #elif (defined(__GNUC__) && defined(__x86_64__)) 499 __asm__ __volatile__ (
"lock\n\t" 500 "xaddq %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(v) :
"memory",
"cc");
503 #warning "atomicAdd(volatile FXlong*,FXlong): not implemented." 504 FXlong ret=*ptr; *ptr+=v;
511 static inline FXlong atomicAnd(
volatile FXlong* ptr,FXlong v){
512 #if defined(_WIN32) && (_MSC_VER >= 1800) 513 return _InterlockedAnd64(ptr,v);
514 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 515 return __atomic_fetch_and(ptr,v,__ATOMIC_SEQ_CST);
516 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 517 return __sync_fetch_and_and(ptr,v);
519 #warning "atomicAnd(volatile FXlong*,FXlong): not implemented." 520 FXlong ret=*ptr; *ptr&=v;
527 static inline FXlong atomicOr(
volatile FXlong* ptr,FXlong v){
528 #if defined(_WIN32) && (_MSC_VER >= 1800) 529 return _InterlockedOr64(ptr,v);
530 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 531 return __atomic_fetch_or(ptr,v,__ATOMIC_SEQ_CST);
532 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 533 return __sync_fetch_and_or(ptr,v);
535 #warning "atomicOr(volatile FXlong*,FXlong): not implemented." 536 FXlong ret=*ptr; *ptr|=v;
543 static inline FXlong atomicXor(
volatile FXlong* ptr,FXlong v){
544 #if defined(_WIN32) && (_MSC_VER >= 1800) 545 return _InterlockedXor64(ptr,v);
546 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 547 return __atomic_fetch_xor(ptr,v,__ATOMIC_SEQ_CST);
548 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 549 return __sync_fetch_and_xor(ptr,v);
551 #warning "atomicXor(volatile FXlong*,FXlong): not implemented." 552 FXlong ret=*ptr; *ptr^=v;
559 static inline FXlong atomicMin(
volatile FXlong* ptr,FXlong v){
561 while(v<(old=*ptr) && !atomicBoolCas(ptr,old,v)){ }
567 static inline FXlong atomicMax(
volatile FXlong* ptr,FXlong v){
569 while((old=*ptr)<v && !atomicBoolCas(ptr,old,v)){ }
578 static inline FXulong atomicSet(
volatile FXulong* ptr,FXulong v){
579 #if defined(_WIN32) && (_MSC_VER >= 1800) 580 return _InterlockedExchange64((
volatile FXlong*)ptr,(FXlong)v);
581 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 582 return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
583 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 584 return __sync_lock_test_and_set(ptr,v);
585 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__))) 587 __asm__ __volatile__ (
"xchgl %%esi, %%ebx\n\t" 592 "xchgl %%esi, %%ebx\n\t" :
"=A"(ret) :
"D"(ptr),
"S"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(*ptr) :
"memory",
"cc");
594 #elif (defined(__GNUC__) && defined(__i386__)) 596 __asm__ __volatile__ (
"1:\n\t" 599 "jnz 1b\n\t" :
"=A"(ret) :
"D"(ptr),
"b"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(*ptr) :
"memory",
"cc");
601 #elif (defined(__GNUC__) && defined(__x86_64__)) 603 __asm__ __volatile__(
"xchgq %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(v) :
"memory",
"cc");
606 #warning "atomicSet(volatile FXulong*,FXulong): not implemented." 607 FXulong ret=*ptr; *ptr=v;
614 static inline FXulong atomicCas(
volatile FXulong* ptr,FXulong expect,FXulong v){
615 #if defined(_WIN32) && (_MSC_VER >= 1800) 616 return _InterlockedCompareExchange64((
volatile FXlong*)ptr,(FXlong)v,(FXlong)expect);
617 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 619 __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
621 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 622 return __sync_val_compare_and_swap(ptr,expect,v);
623 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__))) 625 __asm__ __volatile__ (
"xchgl %%esi, %%ebx\n\t" 628 "movl %%esi, %%ebx\n\t" :
"=A"(ret) :
"D"(ptr),
"S"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(expect) :
"memory",
"cc");
630 #elif (defined(__GNUC__) && defined(__i386__)) 632 __asm__ __volatile__ (
"lock\n\t" 633 "cmpxchg8b (%1)\n\t" :
"=A"(ret) :
"D"(ptr),
"b"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(expect) :
"memory",
"cc");
635 #elif (defined(__GNUC__) && defined(__x86_64__)) 637 __asm__ __volatile__(
"lock\n\t" 638 "cmpxchgq %2,(%1)\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
641 #warning "atomicCas(volatile FXulong*,FXulong,FXulong): not implemented." 643 if(*ptr==expect){ *ptr=v; }
650 static inline FXbool atomicBoolCas(
volatile FXulong* ptr,FXulong expect,FXulong v){
651 #if defined(_WIN32) && (_MSC_VER >= 1800) 652 return (_InterlockedCompareExchange64((
volatile FXlong*)ptr,(FXlong)v,(FXlong)expect)==(FXlong)expect);
653 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 655 return __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
656 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 657 return __sync_bool_compare_and_swap(ptr,expect,v);
658 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__))) 660 __asm__ __volatile__ (
"xchgl %%esi, %%ebx\n\t" 665 "xchgl %%esi, %%ebx\n\t" :
"=A"(ret) :
"D"(ptr),
"S"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(expect) :
"memory",
"cc");
667 #elif (defined(__GNUC__) && defined(__i386__)) 669 __asm__ __volatile__ (
"lock\n\t" 672 "andl $1, %%eax\n\t" :
"=a"(ret) :
"D"(ptr),
"b"((FXuint)v),
"c"((FXuint)(v>>32)),
"A"(expect) :
"memory",
"cc");
674 #elif (defined(__GNUC__) && defined(__x86_64__)) 676 __asm__ __volatile__ (
"lock\n\t" 677 "cmpxchgq %2,(%1)\n\t" 679 "andq $1, %%rax\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
682 #warning "atomicBoolCas(volatile FXulong*,FXulong,FXulong): not implemented." 683 if(*ptr==expect){ *ptr=v;
return true; }
690 static inline FXulong atomicAdd(
volatile FXulong* ptr,FXulong v){
691 #if defined(_WIN32) && (_MSC_VER >= 1800) 692 return _InterlockedExchangeAdd64((
volatile FXlong*)ptr,(FXlong)v);
693 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 694 return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
695 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 696 return __sync_fetch_and_add(ptr,v);
697 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__))) 699 FXuint inchi=(v>>32);
701 __asm __volatile(
"movl %%ebx, %%esi\n\t" 705 "addl %%eax, %%ebx\n\t" 706 "addc %%edx, %%ecx\n\t" 710 "movl %%esi, %%ebx\n\t" :
"=A"(ret) :
"D"(ptr),
"m"(inclo),
"m"(inchi),
"A"(*ptr) :
"esi",
"memory",
"cc");
712 #elif (defined(__GNUC__) && defined(__i386__)) 714 FXuint inchi=(v>>32);
716 __asm __volatile(
"1:\n\t" 719 "addl %%eax, %%ebx\n\t" 720 "addc %%edx, %%ecx\n\t" 723 "jnz 1b\n\t" :
"=A"(ret) :
"D"(ptr),
"m"(inclo),
"m"(inchi),
"A"(*ptr) :
"memory",
"cc");
725 #elif (defined(__GNUC__) && defined(__x86_64__)) 727 __asm__ __volatile__ (
"lock\n\t" 728 "xaddq %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(v) :
"memory",
"cc");
731 #warning "atomicAdd(volatile FXulong*,FXulong): not implemented." 732 FXulong ret=*ptr; *ptr+=v;
739 static inline FXulong atomicAnd(
volatile FXulong* ptr,FXulong v){
740 #if defined(_WIN32) && (_MSC_VER >= 1800) 741 return _InterlockedAnd64((
volatile FXlong*)ptr,(FXlong)v);
742 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 743 return __atomic_fetch_and(ptr,v,__ATOMIC_SEQ_CST);
744 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 745 return __sync_fetch_and_and(ptr,v);
747 #warning "atomicAnd(volatile FXulong*,FXulong): not implemented." 748 FXulong ret=*ptr; *ptr&=v;
755 static inline FXulong atomicOr(
volatile FXulong* ptr,FXulong v){
756 #if defined(_WIN32) && (_MSC_VER >= 1800) 757 return _InterlockedOr64((
volatile FXlong*)ptr,(FXlong)v);
758 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 759 return __atomic_fetch_or(ptr,v,__ATOMIC_SEQ_CST);
760 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 761 return __sync_fetch_and_or(ptr,v);
763 #warning "atomicOr(volatile FXulong*,FXulong): not implemented." 764 FXulong ret=*ptr; *ptr|=v;
771 static inline FXulong atomicXor(
volatile FXulong* ptr,FXulong v){
772 #if defined(_WIN32) && (_MSC_VER >= 1800) 773 return _InterlockedXor64((
volatile FXlong*)ptr,(FXlong)v);
774 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) 775 return __atomic_fetch_xor(ptr,v,__ATOMIC_SEQ_CST);
776 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 777 return __sync_fetch_and_xor(ptr,v);
779 #warning "atomicXor(volatile FXulong*,FXulong): not implemented." 780 FXulong ret=*ptr; *ptr|=v;
787 static inline FXulong atomicMin(
volatile FXulong* ptr,FXulong v){
789 while(v<(old=*ptr) && !atomicBoolCas(ptr,old,v)){ }
795 static inline FXulong atomicMax(
volatile FXulong* ptr,FXulong v){
797 while((old=*ptr)<v && !atomicBoolCas(ptr,old,v)){ }
806 static inline FXptr atomicSet(
volatile FXptr* ptr,FXptr v){
807 #if defined(_WIN32) && (_MSC_VER >= 1800) 808 return (FXptr)_InterlockedExchangePointer(ptr, v);
809 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_POINTER_LOCK_FREE == 2) 810 return __atomic_exchange_n(ptr,v,__ATOMIC_SEQ_CST);
811 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 812 return (TPtr)__sync_lock_test_and_set(ptr,v);
813 #elif (defined(__GNUC__) && defined(__i386__)) 815 __asm__ __volatile__(
"xchgl %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(ret) :
"memory",
"cc");
817 #elif (defined(__GNUC__) && defined(__x86_64__)) 819 __asm__ __volatile__(
"xchgq %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(ret) :
"memory",
"cc");
822 #warning "atomicSet(volatile FXptr*,FXptr): not implemented." 823 FXptr ret=*ptr; *ptr=v;
830 static inline FXptr atomicCas(
volatile FXptr* ptr,FXptr expect,FXptr v){
831 #if defined(_WIN32) && (_MSC_VER >= 1800) 832 return _InterlockedCompareExchangePointer((
void**)ptr,v,expect);
833 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_POINTER_LOCK_FREE == 2) 835 __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
837 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 838 return __sync_val_compare_and_swap(ptr,expect,v);
839 #elif (defined(__GNUC__) && defined(__i386__)) 841 __asm__ __volatile__(
"lock\n\t" 842 "cmpxchgl %2,(%1)\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
844 #elif (defined(__GNUC__) && defined(__x86_64__)) 846 __asm__ __volatile__(
"lock\n\t" 847 "cmpxchgq %2,(%1)\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
850 #warning "atomicCas(volatile FXptr*,FXptr,FXptr): not implemented." 852 if(*ptr==expect){ *ptr=v; }
859 static inline FXbool atomicBoolCas(
volatile FXptr* ptr,FXptr expect,FXptr v){
860 #if defined(_WIN32) && (_MSC_VER >= 1800) 861 return (_InterlockedCompareExchangePointer((
void**)ptr,v,expect)==expect);
862 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_POINTER_LOCK_FREE == 2) 864 return __atomic_compare_exchange_n(ptr,&ex,v,
false,__ATOMIC_SEQ_CST,__ATOMIC_RELAXED);
865 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 866 return __sync_bool_compare_and_swap(ptr,expect,v);
867 #elif (defined(__GNUC__) && defined(__i386__)) 869 __asm__ __volatile__ (
"lock\n\t" 870 "cmpxchgl %2,(%1)\n\t" 872 "andl $1, %%eax\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
874 #elif (defined(__GNUC__) && defined(__x86_64__)) 876 __asm__ __volatile__ (
"lock\n\t" 877 "cmpxchgq %2,(%1)\n\t" 879 "andq $1, %%rax\n\t" :
"=a"(ret) :
"r"(ptr),
"r"(v),
"a"(expect) :
"memory",
"cc");
882 #warning "atomicBoolCas(volatile FXptr*,FXptr,FXptr): not implemented." 883 if(*ptr==expect){ *ptr=v;
return true; }
890 static inline FXbool atomicBoolDCas(
volatile FXptr* ptr,FXptr cmpa,FXptr cmpb,FXptr a,FXptr b){
891 #if (defined(_WIN32) && (_MSC_VER >= 1500) && defined(_WIN64)) 892 FXlong duet[2]={(FXlong)a,(FXlong)b};
893 return !!(_InterlockedCompareExchange128((
volatile FXlong*)ptr,(FXlong)cmpb,(FXlong)cmpa,duet));
894 #elif (defined(_WIN32) && (_MSC_VER >= 1500)) 895 __int64 ab=(((__int64)(FXuval)a)|((__int64)(FXuval)b)<<32);
896 __int64 compab=(((__int64)(FXuval)cmpa)|((__int64)(FXuval)cmpb)<<32);
897 return (InterlockedCompareExchange64((__int64
volatile *)ptr,ab,compab)==compab);
910 #elif (defined(__GNUC__) && defined(__i386__) && (defined(__PIC__) || defined(__PIE__))) 912 __asm__ __volatile__ (
"xchgl %%esi, %%ebx\n\t" 917 "xchgl %%esi, %%ebx\n\t" :
"=a"(ret) :
"D"(ptr),
"a"(cmpa),
"d"(cmpb),
"S"(a),
"c"(b) :
"memory",
"cc");
919 #elif (defined(__GNUC__) && defined(__i386__)) 921 __asm__ __volatile__ (
"lock\n\t" 924 "andl $1, %%eax\n\t" :
"=a"(ret) :
"D"(ptr),
"a"(cmpa),
"d"(cmpb),
"b"(a),
"c"(b) :
"memory",
"cc");
926 #elif (defined(__GNUC__) && defined(__x86_64__)) 928 __asm__ __volatile__ (
"lock\n\t" 929 "cmpxchg16b (%1)\n\t" 931 "andq $1, %%rax\n\t" :
"=a"(ret) :
"r"(ptr),
"a"(cmpa),
"d"(cmpb),
"b"(a),
"c"(b) :
"memory",
"cc");
934 #warning "atomicBoolDCas(volatile FXptr*,FXptr,FXptr,FXptr,FXptr): not implemented." 935 if(ptr[0]==cmpa && ptr[1]==cmpb){ ptr[0]=a; ptr[1]=b;
return true; }
942 static inline FXptr atomicAdd(
volatile FXptr* ptr,FXival v){
943 #if defined(_WIN32) && (_MSC_VER >= 1800) && defined(_WIN64) 944 return (FXptr)_InterlockedExchangeAdd64((
volatile FXlong*)ptr,(FXlong)v);
945 #elif defined(_WIN32) && (_MSC_VER >= 1800) 946 return (FXptr)InterlockedExchangeAdd((
volatile long*)ptr,(
long)v);
947 #elif defined(__GNUC__) && defined(__ATOMIC_SEQ_CST) && (__GCC_ATOMIC_POINTER_LOCK_FREE == 2) 948 return __atomic_fetch_add(ptr,v,__ATOMIC_SEQ_CST);
949 #elif ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))) 950 return __sync_fetch_and_add(ptr,v);
951 #elif (defined(__GNUC__) && defined(__i386__)) 953 __asm__ __volatile__ (
"lock\n\t" 954 "xaddl %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0"(ret) :
"memory",
"cc");
956 #elif (defined(__GNUC__) && defined(__x86_64__)) 958 __asm__ __volatile__ (
"lock\n\t" 959 "xaddq %0,(%1)\n\t" :
"=r"(ret) :
"r"(ptr),
"0" (ret) :
"memory",
"cc");
962 #warning "atomicAdd(volatile FXptr*,FXival): not implemented." 963 FXptr ret=*ptr; *ptr+=v;
973 template <
typename EType>
974 static inline EType* atomicSet(EType *
volatile *ptr,EType* v){
975 return (EType*)atomicSet((
volatile FXptr*)ptr,(FXptr)v);
980 template <
typename EType>
981 static inline EType* atomicAdd(EType *
volatile *ptr,FXival v){
982 return (EType*)atomicAdd((
volatile FXptr*)ptr,v*((FXival)
sizeof(EType)));
987 template <
typename EType>
988 static inline EType* atomicCas(EType *
volatile *ptr,EType* expect,EType* v){
989 return (EType*)atomicCas((
volatile FXptr*)ptr,(FXptr)expect,(FXptr)v);
994 template <
typename EType>
995 static inline FXbool atomicBoolCas(EType *
volatile *ptr,EType* expect,EType* v){
996 return atomicBoolCas((
volatile FXptr*)ptr,(FXptr)expect,(FXptr)v);
1001 template <
typename EType>
1002 static inline FXbool atomicBoolDCas(
volatile EType* ptr,EType cmpa,EType cmpb,EType a,EType b){
1003 return atomicBoolDCas((
volatile FXptr*)ptr,(FXptr)cmpa,(FXptr)cmpb,(FXptr)a,(FXptr)b);
Definition: FX4Splitter.h:28