[DDK/XDK]

- Fix _XSTATE_SAVE definition for ARM

[NDK]
- #if out NtCurrentTeb() inline function for ARM, it's implemented in ARMLIB

[CRT]
- #if out some intrinsics in msc/intrin.h that don't exist on ARM
- Add _crt_va_start, _crt_va_arg and _crt_va_end for ARM

svn path=/trunk/; revision=56964
This commit is contained in:
Timo Kreuzer 2012-07-24 19:02:29 +00:00
parent ed8d5e3ad7
commit 67bba2223c
5 changed files with 45 additions and 17 deletions

View file

@ -10,8 +10,10 @@ void * _ReturnAddress(void);
#pragma intrinsic(_ReturnAddress)
void * _AddressOfReturnAddress(void);
#pragma intrinsic(_AddressOfReturnAddress)
#if defined(_M_IX86) || defined(_M_AMD64)
unsigned int __getcallerseflags(void);
#pragma intrinsic(__getcallerseflags)
#endif
/*** Memory barriers ***/
void _ReadWriteBarrier(void);
@ -20,12 +22,14 @@ void _ReadBarrier(void);
#pragma intrinsic(_ReadBarrier)
void _WriteBarrier(void);
#pragma intrinsic(_WriteBarrier)
#if defined(_M_IX86) || defined(_M_AMD64)
void _mm_mfence(void);
#pragma intrinsic(_mm_mfence)
void _mm_lfence(void);
#pragma intrinsic(_mm_lfence)
void _mm_sfence(void);
#pragma intrinsic(_mm_sfence)
#endif
#ifdef _M_AMD64
void __faststorefence(void);
#pragma intrinsic(__faststorefence)
@ -97,6 +101,7 @@ unsigned char _interlockedbittestandset64(volatile __int64 * a, __int64 b);
#pragma intrinsic(_interlockedbittestandset64)
#endif
#if defined(_M_IX86) || defined(_M_AMD64)
/*** String operations ***/
void __stosb(unsigned char * Dest, unsigned char Data, size_t Count);
#pragma intrinsic(__stosb)
@ -110,6 +115,7 @@ void __movsw(unsigned short * Destination, unsigned short const * Source, size_t
#pragma intrinsic(__movsw)
void __movsd(unsigned long * Destination, unsigned long const * Source, size_t Count);
#pragma intrinsic(__movsd)
#endif
#ifdef _M_AMD64
void __movsq(unsigned __int64 * Destination, unsigned __int64 const * Source, size_t Count);
#pragma intrinsic(__movsq)
@ -203,33 +209,38 @@ unsigned char _rotr8(unsigned char value, unsigned char shift);
#pragma intrinsic(_rotr8)
unsigned short _rotr16(unsigned short value, unsigned char shift);
#pragma intrinsic(_rotr16)
unsigned __int64 __ll_lshift(unsigned __int64 Mask, int Bit);
#pragma intrinsic(__ll_lshift)
__int64 __ll_rshift(__int64 Mask, int Bit);
#pragma intrinsic(__ll_rshift)
unsigned __int64 __ull_rshift(unsigned __int64 Mask, int Bit);
#pragma intrinsic(__ull_rshift)
unsigned short _byteswap_ushort(unsigned short value);
#pragma intrinsic(_byteswap_ushort)
unsigned long _byteswap_ulong(unsigned long value);
#pragma intrinsic(_byteswap_ulong)
unsigned __int64 _byteswap_uint64(unsigned __int64 value);
#pragma intrinsic(_byteswap_uint64)
#if defined(_M_IX86) || defined(_M_AMD64)
unsigned __int64 __ll_lshift(unsigned __int64 Mask, int Bit);
#pragma intrinsic(__ll_lshift)
__int64 __ll_rshift(__int64 Mask, int Bit);
#pragma intrinsic(__ll_rshift)
unsigned __int64 __ull_rshift(unsigned __int64 Mask, int Bit);
#pragma intrinsic(__ull_rshift)
#endif
#ifdef _M_AMD64
unsigned char _bittest64(__int64 const *a, __int64 b);
#pragma intrinsic(_bittest64)
#endif
#if defined(_M_IX86) || defined(_M_AMD64)
/*** 64-bit math ***/
__int64 __emul(int a, int b);
#pragma intrinsic(__emul)
unsigned __int64 __emulu(unsigned int a, unsigned int b);
#pragma intrinsic(__emulu)
#endif
#ifdef _M_AMD64
unsigned __int64 __umulh(unsigned __int64 a, unsigned __int64 b);
#pragma intrinsic(__umulh)
#endif
#if defined(_M_IX86) || defined(_M_AMD64)
/*** Port I/O ***/
unsigned char __inbyte(unsigned short Port);
#pragma intrinsic(__inbyte)
@ -255,7 +266,9 @@ void __outwordstring(unsigned short Port, unsigned short * Buffer, unsigned long
#pragma intrinsic(__outwordstring)
void __outdwordstring(unsigned short Port, unsigned long * Buffer, unsigned long Count);
#pragma intrinsic(__outdwordstring)
#endif
#if defined(_M_IX86) || defined(_M_AMD64)
/*** System information ***/
void __cpuid(int CPUInfo[], int InfoType);
#pragma intrinsic(__cpuid)
@ -265,27 +278,31 @@ void __writeeflags(uintptr_t Value);
#pragma intrinsic(__writeeflags)
uintptr_t __readeflags(void);
#pragma intrinsic(__readeflags)
#endif
/*** Interrupts ***/
void __debugbreak(void);
#pragma intrinsic(__debugbreak)
void __int2c(void);
#pragma intrinsic(__int2c)
void _disable(void);
#pragma intrinsic(_disable)
void _enable(void);
#pragma intrinsic(_enable)
#if defined(_M_IX86) || defined(_M_AMD64)
void __int2c(void);
#pragma intrinsic(__int2c)
void __halt(void);
#pragma intrinsic(__halt)
#endif
/*** Protected memory management ***/
#if defined(_M_IX86) || defined(_M_AMD64)
void __writecr0(unsigned __int64 Data);
#pragma intrinsic(__writecr0)
void __writecr3(unsigned __int64 Data);
#pragma intrinsic(__writecr3)
void __writecr4(unsigned __int64 Data);
#pragma intrinsic(__writecr4)
#endif
#ifdef _M_AMD64
void __writecr8(unsigned __int64 Data);
#pragma intrinsic(__writecr8)
@ -303,7 +320,7 @@ unsigned __int64 __readdr(unsigned int reg);
#pragma intrinsic(__readdr)
void __writedr(unsigned reg, unsigned __int64 value);
#pragma intrinsic(__writedr)
#else
#else if defined(_M_IX86)
unsigned long __readcr0(void);
unsigned long __readcr2(void);
unsigned long __readcr3(void);
@ -317,9 +334,6 @@ unsigned int __readdr(unsigned int reg);
void __writedr(unsigned reg, unsigned int value);
#endif
void __invlpg(void * Address);
#pragma intrinsic(__invlpg)
#ifdef _M_IX86
// This intrinsic is broken and generates wrong opcodes,
// when optimization is enabled!
@ -337,9 +351,13 @@ void __forceinline __invlpg_fixed(void * Address)
}
#pragma warning(pop)
#define __invlpg __invlpg_fixed
#elif defined(_M_AMD64)
void __invlpg(void * Address);
#pragma intrinsic(__invlpg)
#endif
/*** System operations ***/
#if defined(_M_IX86) || defined(_M_AMD64)
unsigned __int64 __readmsr(int reg);
#pragma intrinsic(__readmsr)
void __writemsr(unsigned long Register, unsigned __int64 Value);
@ -356,6 +374,7 @@ void __sidt(void *Destination);
#pragma intrinsic(__sidt)
void _mm_pause(void);
#pragma intrinsic(_mm_pause)
#endif
#ifdef __cplusplus
}

View file

@ -62,9 +62,7 @@ extern "C" {
#if defined(__ia64__)
#define _VA_ALIGN 8
#define _SLOTSIZEOF(t) ((sizeof(t) + _VA_ALIGN - 1) & ~(_VA_ALIGN - 1))
#define _VA_STRUCT_ALIGN 16
#define _ALIGNOF(ap) ((((ap)+_VA_STRUCT_ALIGN - 1) & ~(_VA_STRUCT_ALIGN -1)) - (ap))
#define _APALIGN(t,ap) (__alignof(t) > 8 ? _ALIGNOF((uintptr_t) ap) : 0)
#else
@ -95,6 +93,15 @@ extern "C" {
(*(t*)(((v) += sizeof(void*)) - sizeof(void*))))
#define _crt_va_end(v) ((void)((v) = (va_list)0))
#define __va_copy(d,s) ((void)((d) = (s)))
#elif defined(_M_ARM)
#ifdef __cplusplus
extern void __cdecl __va_start(va_list*, ...);
#define _crt_va_start(ap,v) __va_start(&ap, _ADDRESSOF(v), _SLOTSIZEOF(v), _ADDRESSOF(v))
#else
#define _crt_va_start(ap,v) (ap = (va_list)_ADDRESSOF(v) + _SLOTSIZEOF(v))
#endif
#define _crt_va_arg(ap,t) (*(t*)((ap += _SLOTSIZEOF(t) + _APALIGN(t,ap)) - _SLOTSIZEOF(t)))
#define _crt_va_end(ap) ( ap = (va_list)0 )
#else //if defined(_M_IA64) || defined(_M_CEE)
#error Please implement me
#endif

View file

@ -1540,7 +1540,7 @@ typedef struct _XSTATE_SAVE {
struct _KTHREAD* Thread;
UCHAR Level;
XSTATE_CONTEXT XStateContext;
#elif defined(_IA64_)
#elif defined(_IA64_) || defined(_ARM_)
ULONG Dummy;
#elif defined(_X86_)
_ANONYMOUS_UNION union {

View file

@ -371,6 +371,7 @@ NtCreateThread(
IN BOOLEAN CreateSuspended
);
#ifndef _M_ARM
#ifndef NTOS_MODE_USER
FORCEINLINE struct _TEB * NtCurrentTeb(VOID)
{
@ -383,6 +384,7 @@ FORCEINLINE struct _TEB * NtCurrentTeb(VOID)
#else
struct _TEB * NtCurrentTeb(void);
#endif
#endif
NTSYSCALLAPI
NTSTATUS

View file

@ -968,7 +968,7 @@ typedef struct _XSTATE_SAVE {
struct _KTHREAD* Thread;
UCHAR Level;
XSTATE_CONTEXT XStateContext;
#elif defined(_IA64_)
#elif defined(_IA64_) || defined(_ARM_)
ULONG Dummy;
#elif defined(_X86_)
_ANONYMOUS_UNION union {