[CRT:INTRIN_X86]

- Use correct check for x64 architecture
- Fix Interlocked*64 return types
- Add _InterlockedXor64

svn path=/trunk/; revision=56467
This commit is contained in:
Thomas Faber 2012-05-01 15:45:49 +00:00
parent 05e66ae9a9
commit 785d0653ee

View file

@ -75,7 +75,7 @@ extern "C" {
/* TODO: __getcallerseflags but how??? */
/* Maybe the same for x86? */
#ifdef _x86_64
#ifdef __x86_64__
#define _alloca(s) __builtin_alloca(s)
#endif
@ -109,10 +109,10 @@ __INTRIN_INLINE void _mm_sfence(void)
_WriteBarrier();
}
#ifdef _x86_64
#ifdef __x86_64__
__INTRIN_INLINE void __faststorefence(void)
{
long local;
long local;
__asm__ __volatile__("lock; orl $0, %0;" : : "m"(local));
}
#endif
@ -198,7 +198,7 @@ __INTRIN_INLINE long _InterlockedAnd(volatile long * const value, const long mas
}
#if defined(_M_AMD64)
__INTRIN_INLINE long _InterlockedAnd64(volatile long long * const value, const long long mask)
__INTRIN_INLINE long long _InterlockedAnd64(volatile long long * const value, const long long mask)
{
return __sync_fetch_and_and(value, mask);
}
@ -220,7 +220,7 @@ __INTRIN_INLINE long _InterlockedOr(volatile long * const value, const long mask
}
#if defined(_M_AMD64)
__INTRIN_INLINE long _InterlockedOr64(volatile long long * const value, const long long mask)
__INTRIN_INLINE long long _InterlockedOr64(volatile long long * const value, const long long mask)
{
return __sync_fetch_and_or(value, mask);
}
@ -241,6 +241,13 @@ __INTRIN_INLINE long _InterlockedXor(volatile long * const value, const long mas
return __sync_fetch_and_xor(value, mask);
}
#if defined(_M_AMD64)
__INTRIN_INLINE long long _InterlockedXor64(volatile long long * const value, const long long mask)
{
return __sync_fetch_and_xor(value, mask);
}
#endif
#else
__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)