Add internal amd64 headers.

svn path=/trunk/; revision=46620
This commit is contained in:
Timo Kreuzer 2010-03-31 15:11:20 +00:00
parent fbf4909187
commit 99e6ad789f
7 changed files with 672 additions and 0 deletions

View file

@ -0,0 +1,81 @@
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS Kernel
* FILE: ntoskrnl/include/amd64/asmmacro.S
* PURPOSE: Macros for x64 assembly
* PROGRAMMERS: Timo Kreuzer (timo.kreuzer@reactos.org)
*/
.intel_syntax noprefix
.code64
/* Macros for x64 stack unwind OPs */
.macro .proc name
.func name
.global _\name
_\name:
.cfi_startproc
.equ cfa_current_offset, -8
.endm
.macro .endproc
.cfi_endproc
.endfunc
.endm
.macro .allocstack size
.cfi_adjust_cfa_offset \size
.set cfa_current_offset, cfa_current_offset - \size
.endm
.macro .pushframe code
.if (\code == 0)
.cfi_adjust_cfa_offset 0x28
.set cfa_current_offset, cfa_current_offset - 0x28
.else
.cfi_adjust_cfa_offset 0x30
.set cfa_current_offset, cfa_current_offset - 0x30
.endif
.endm
.macro .pushreg reg
.cfi_adjust_cfa_offset 8
.equ cfa_current_offset, cfa_current_offset - 8
.cfi_offset \reg, cfa_current_offset
.endm
.macro .savereg reg, offset
// checkme!!!
.cfi_offset \reg, \offset
.endm
.macro .savexmm128 reg, offset
// checkme!!!
.cfi_offset \reg, \offset
.endm
.macro .setframe reg, offset
.cfi_def_cfa reg, \offset
.equ cfa_current_offset, \offset
.endm
.macro .endprolog
.endm
.macro UNIMPLEMENTED2 line, func
jmp 3f
.equ expr, 12
1: .asciz "\func"
2: .asciz __FILE__
3:
sub rsp, 0x20
lea rcx, _MsgUnimplemented[rip]
lea rdx, 1b[rip]
lea r8, 2b[rip]
mov r9, \line
call _DbgPrint
add rsp, 0x20
.endm
#define UNIMPLEMENTED UNIMPLEMENTED2 __LINE__,

View file

@ -0,0 +1,128 @@
#ifndef _INTRIN_INTERNAL_
#define _INTRIN_INTERNAL_
VOID
FORCEINLINE
KeSetCurrentIrql(KIRQL Irql)
{
__writecr8(Irql);
}
PKGDTENTRY64
FORCEINLINE
KiGetGdtEntry(PVOID pGdt, USHORT Selector)
{
return (PKGDTENTRY64)((ULONG64)pGdt + (Selector & ~RPL_MASK));
}
PVOID
FORCEINLINE
KiGetGdtDescriptorBase(PKGDTENTRY Entry)
{
return (PVOID)((ULONG64)Entry->BaseLow |
(ULONG64)Entry->Bytes.BaseMiddle << 16 |
(ULONG64)Entry->Bytes.BaseHigh << 24 |
(ULONG64)Entry->BaseUpper << 32);
}
VOID
FORCEINLINE
KiSetGdtDescriptorBase(PKGDTENTRY Entry, ULONG64 Base)
{
Entry->BaseLow = Base & 0xffff;
Entry->Bits.BaseMiddle = (Base >> 16) & 0xff;
Entry->Bits.BaseHigh = (Base >> 24) & 0xff;
Entry->BaseUpper = Base >> 32;
}
VOID
FORCEINLINE
KiSetGdtDescriptorLimit(PKGDTENTRY Entry, ULONG Limit)
{
Entry->LimitLow = Limit & 0xffff;
Entry->Bits.LimitHigh = Limit >> 16;
}
VOID
FORCEINLINE
KiInitGdtEntry(PKGDTENTRY64 Entry, ULONG64 Base, ULONG Size, UCHAR Type, UCHAR Dpl)
{
KiSetGdtDescriptorBase(Entry, Base);
KiSetGdtDescriptorLimit(Entry, Size - 1);
Entry->Bits.Type = Type;
Entry->Bits.Dpl = Dpl;
Entry->Bits.Present = 1;
Entry->Bits.System = 0;
Entry->Bits.LongMode = 0;
Entry->Bits.DefaultBig = 0;
Entry->Bits.Granularity = 0;
Entry->MustBeZero = 0;
}
#if defined(__GNUC__)
static __inline__ __attribute__((always_inline)) void __lgdt(void *Source)
{
__asm__ __volatile__("lgdt %0" : : "m"(*(short*)Source));
}
static __inline__ __attribute__((always_inline)) void __sgdt(void *Destination)
{
__asm__ __volatile__("sgdt %0" : : "m"(*(short*)Destination) : "memory");
}
static __inline__ __attribute__((always_inline)) void __lldt(unsigned short Value)
{
__asm__ __volatile__("lldt %0" : : "rm"(Value));
}
static __inline__ __attribute__((always_inline)) void __sldt(void *Destination)
{
__asm__ __volatile__("sldt %0" : : "m"(*(short*)Destination) : "memory");
}
static __inline__ __attribute__((always_inline)) void __ldmxcsr(unsigned long *Source)
{
__asm__ __volatile__("ldmxcsr %0" : : "m"(*Source));
}
static __inline__ __attribute__((always_inline)) void __stmxcsr(unsigned long *Destination)
{
__asm__ __volatile__("stmxcsr %0" : : "m"(*Destination) : "memory");
}
static __inline__ __attribute__((always_inline)) void __ltr(unsigned short Source)
{
__asm__ __volatile__("ltr %0" : : "rm"(Source));
}
static __inline__ __attribute__((always_inline)) void __str(unsigned short *Destination)
{
__asm__ __volatile__("str %0" : : "m"(*Destination) : "memory");
}
#elif defined(_MSC_VER)
#define UNIMPLEMENTED DbgPrint("%s is unimplemented!!!\n", __FUNCTION__);
void __lgdt(void *Source);
void __sgdt(void *Destination);
void __lldt(unsigned short Value);
void __sldt(void *Destination);
void __ltr(unsigned short Source);
void __str(unsigned short *Destination);
#else
#error Unknown compiler for inline assembler
#endif
#endif
/* EOF */

View file

@ -0,0 +1,268 @@
#ifndef __NTOSKRNL_INCLUDE_INTERNAL_AMD64_KE_H
#define __NTOSKRNL_INCLUDE_INTERNAL_AMD64_KE_H
#define X86_EFLAGS_TF 0x00000100 /* Trap flag */
#define X86_EFLAGS_IF 0x00000200 /* Interrupt Enable flag */
#define X86_EFLAGS_IOPL 0x00003000 /* I/O Privilege Level bits */
#define X86_EFLAGS_NT 0x00004000 /* Nested Task flag */
#define X86_EFLAGS_RF 0x00010000 /* Resume flag */
#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
#define X86_CR0_PE 0x00000001 /* enable Protected Mode */
#define X86_CR0_NE 0x00000020 /* enable native FPU error reporting */
#define X86_CR0_TS 0x00000008 /* enable exception on FPU instruction for task switch */
#define X86_CR0_EM 0x00000004 /* enable FPU emulation (disable FPU) */
#define X86_CR0_MP 0x00000002 /* enable FPU monitoring */
#define X86_CR0_WP 0x00010000 /* enable Write Protect (copy on write) */
#define X86_CR0_PG 0x80000000 /* enable Paging */
#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
#define X86_CR4_PGE 0x00000080 /* enable global pages */
#define X86_CR4_OSFXSR 0x00000200 /* enable FXSAVE/FXRSTOR instructions */
#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable #XF exception */
#define X86_FEATURE_VME 0x00000002 /* Virtual 8086 Extensions are present */
#define X86_FEATURE_TSC 0x00000010 /* time stamp counters are present */
#define X86_FEATURE_PAE 0x00000040 /* physical address extension is present */
#define X86_FEATURE_CX8 0x00000100 /* CMPXCHG8B instruction present */
#define X86_FEATURE_SYSCALL 0x00000800 /* SYSCALL/SYSRET support present */
#define X86_FEATURE_PGE 0x00002000 /* Page Global Enable */
#define X86_FEATURE_MMX 0x00800000 /* MMX extension present */
#define X86_FEATURE_FXSR 0x01000000 /* FXSAVE/FXRSTOR instructions present */
#define X86_FEATURE_SSE 0x02000000 /* SSE extension present */
#define X86_FEATURE_SSE2 0x04000000 /* SSE2 extension present */
#define X86_FEATURE_HT 0x10000000 /* Hyper-Threading present */
#define X86_EXT_FEATURE_SSE3 0x00000001 /* SSE3 extension present */
#define X86_EXT_FEATURE_3DNOW 0x40000000 /* 3DNOW! extension present */
#define FRAME_EDITED 0xFFF8
#define X86_MSR_GSBASE 0xC0000101
#define X86_MSR_KERNEL_GSBASE 0xC0000102
#define X86_MSR_EFER 0xC0000080
#define X86_MSR_STAR 0xC0000081
#define X86_MSR_LSTAR 0xC0000082
#define X86_MSR_CSTAR 0xC0000083
#define X86_MSR_SFMASK 0xC0000084
#define EFER_SCE 0x01
#define EFER_LME 0x10
#define EFER_LMA 0x40
#define EFER_NXE 0x80
#define EFER_SVME 0x100
#define EFER_FFXSR 0x400
#define AMD64_TSS 9
#ifndef __ASM__
#include "intrin_i.h"
typedef struct _KIDT_INIT
{
UCHAR InterruptId;
UCHAR Dpl;
UCHAR IstIndex;
PVOID ServiceRoutine;
} KIDT_INIT, *PKIDT_INIT;
extern ULONG Ke386CacheAlignment;
extern ULONG KeI386NpxPresent;
extern ULONG KeI386XMMIPresent;
extern ULONG KeI386FxsrPresent;
extern ULONG KeI386CpuType;
extern ULONG KeI386CpuStep;
#define IMAGE_FILE_MACHINE_ARCHITECTURE IMAGE_FILE_MACHINE_AMD64
//
// INT3 is 1 byte long
//
#define KD_BREAKPOINT_TYPE UCHAR
#define KD_BREAKPOINT_SIZE sizeof(UCHAR)
#define KD_BREAKPOINT_VALUE 0xCC
//
// Macros for getting and setting special purpose registers in portable code
//
#define KeGetContextPc(Context) \
((Context)->Rip)
#define KeSetContextPc(Context, ProgramCounter) \
((Context)->Rip = (ProgramCounter))
#define KeGetTrapFramePc(TrapFrame) \
((TrapFrame)->Rip)
#define KeGetContextReturnRegister(Context) \
((Context)->Rax)
#define KeSetContextReturnRegister(Context, ReturnValue) \
((Context)->Rax = (ReturnValue))
//
// Macro to get trap and exception frame from a thread stack
//
#define KeGetTrapFrame(Thread) \
(PKTRAP_FRAME)((ULONG_PTR)((Thread)->InitialStack) - \
sizeof(KTRAP_FRAME))
//
// Macro to get context switches from the PRCB
// All architectures but x86 have it in the PRCB's KeContextSwitches
//
#define KeGetContextSwitches(Prcb) \
(Prcb->KeContextSwitches)
#define KeGetExceptionFrame(Thread) \
(PKEXCEPTION_FRAME)((ULONG_PTR)KeGetTrapFrame(Thread) - \
sizeof(KEXCEPTION_FRAME))
//
// Returns the Interrupt State from a Trap Frame.
// ON = TRUE, OFF = FALSE
//
#define KeGetTrapFrameInterruptState(TrapFrame) \
BooleanFlagOn((TrapFrame)->EFlags, EFLAGS_INTERRUPT_MASK)
//
// Invalidates the TLB entry for a specified address
//
FORCEINLINE
VOID
KeInvalidateTlbEntry(IN PVOID Address)
{
/* Invalidate the TLB entry for this address */
__invlpg(Address);
}
FORCEINLINE
VOID
KeFlushProcessTb(VOID)
{
/* Flush the TLB by resetting CR3 */
__writecr3(__readcr3());
}
FORCEINLINE
VOID
KiRundownThread(IN PKTHREAD Thread)
{
#ifndef CONFIG_SMP
DbgPrint("KiRundownThread is unimplemented\n");
#else
/* Nothing to do */
#endif
}
VOID
FORCEINLINE
KiEndInterrupt(IN KIRQL Irql,
IN PKTRAP_FRAME TrapFrame)
{
DbgPrint("KiEndInterrupt is unimplemented\n");
}
#define Ki386PerfEnd(x)
struct _KPCR;
VOID
FASTCALL
KiInitializeTss(IN PKTSS Tss, IN UINT64 Stack);
VOID KiDivideErrorFault();
VOID KiDebugTrapOrFault();
VOID KiNmiInterrupt();
VOID KiBreakpointTrap();
VOID KiOverflowTrap();
VOID KiBoundFault();
VOID KiInvalidOpcodeFault();
VOID KiNpxNotAvailableFault();
VOID KiDoubleFaultAbort();
VOID KiNpxSegmentOverrunAbort();
VOID KiInvalidTssFault();
VOID KiSegmentNotPresentFault();
VOID KiStackFault();
VOID KiGeneralProtectionFault();
VOID KiPageFault();
VOID KiFloatingErrorFault();
VOID KiAlignmentFault();
VOID KiMcheckAbort();
VOID KiXmmException();
VOID KiApcInterrupt();
VOID KiRaiseAssertion();
VOID KiDebugServiceTrap();
VOID KiDpcInterrupt();
VOID KiIpiInterrupt();
VOID
KiGdtPrepareForApplicationProcessorInit(ULONG Id);
VOID
Ki386InitializeLdt(VOID);
VOID
Ki386SetProcessorFeatures(VOID);
VOID
NTAPI
KiGetCacheInformation(VOID);
BOOLEAN
NTAPI
KiIsNpxPresent(
VOID
);
BOOLEAN
NTAPI
KiIsNpxErrataPresent(
VOID
);
VOID
NTAPI
KiSetProcessorType(VOID);
ULONG
NTAPI
KiGetFeatureBits(VOID);
VOID
NTAPI
KiInitializeCpuFeatures();
ULONG KeAllocateGdtSelector(ULONG Desc[2]);
VOID KeFreeGdtSelector(ULONG Entry);
VOID
NtEarlyInitVdm(VOID);
VOID
KeApplicationProcessorInitDispatcher(VOID);
VOID
KeCreateApplicationProcessorIdleThread(ULONG Id);
VOID
NTAPI
Ke386InitThreadWithContext(PKTHREAD Thread,
PKSYSTEM_ROUTINE SystemRoutine,
PKSTART_ROUTINE StartRoutine,
PVOID StartContext,
PCONTEXT Context);
#define KeArchInitThreadWithContext(Thread,SystemRoutine,StartRoutine,StartContext,Context) \
Ke386InitThreadWithContext(Thread,SystemRoutine,StartRoutine,StartContext,Context)
#ifdef _NTOSKRNL_ /* FIXME: Move flags above to NDK instead of here */
VOID
NTAPI
KiThreadStartup(PKSYSTEM_ROUTINE SystemRoutine,
PKSTART_ROUTINE StartRoutine,
PVOID StartContext,
BOOLEAN UserThread,
KTRAP_FRAME TrapFrame);
#endif
#endif
#endif /* __NTOSKRNL_INCLUDE_INTERNAL_AMD64_KE_H */
/* EOF */

View file

@ -0,0 +1,189 @@
/*
* Lowlevel memory managment definitions
*/
#pragma once
/* Helper macros */
#define PAGE_MASK(x) ((x)&(~0xfff))
#define PAE_PAGE_MASK(x) ((x)&(~0xfffLL))
/* Memory layout base addresses */
#define HYPER_SPACE 0xFFFFF70000000000ULL
#define HYPER_SPACE_END 0xFFFFF77FFFFFFFFFULL
#define MI_SESSION_SPACE_MINIMUM (PVOID)0xFFFFF90000000000ULL
#define MI_SESSION_VIEW_END (PVOID)0xFFFFF97FFF000000ULL
#define MI_SESSION_SPACE_END (PVOID)0xFFFFF98000000000ULL
#define MI_SYSTEM_PTE_START (PVOID)0xFFFFFAA000000000ULL
#define MI_PAGED_POOL_START (PVOID)0xFFFFFA8000000000ULL
#define MI_NON_PAGED_SYSTEM_START_MIN 0xFFFFFAA000000000ULL
#define MI_PFN_DATABASE (PVOID)0xFFFFFAC000000000ULL
#define MI_NONPAGED_POOL_END (PVOID)0xFFFFFAE000000000ULL
#define MI_DEBUG_MAPPING (PVOID)0xFFFFFFFF80000000ULL // FIXME
#define MI_HIGHEST_SYSTEM_ADDRESS (PVOID)0xFFFFFFFFFFFFFFFFULL
#define MI_SYSTEM_CACHE_WS_START (PVOID)0xFFFFF78000001000ULL // CHECKME
/* Memory sizes */
#define MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING ((255*1024*1024) >> PAGE_SHIFT)
#define MI_MIN_PAGES_FOR_SYSPTE_TUNING ((19*1024*1024) >> PAGE_SHIFT)
#define MI_MIN_PAGES_FOR_SYSPTE_BOOST ((32*1024*1024) >> PAGE_SHIFT)
#define MI_MAX_INIT_NONPAGED_POOL_SIZE (128ULL * 1024 * 1024 * 1024)
#define MI_MAX_NONPAGED_POOL_SIZE (128ULL * 1024 * 1024 * 1024)
#define MI_MAX_FREE_PAGE_LISTS 4
#define MI_MIN_INIT_PAGED_POOLSIZE (32 * 1024 * 1024)
#define MI_SESSION_VIEW_SIZE (20 * 1024 * 1024)
#define MI_SESSION_POOL_SIZE (16 * 1024 * 1024)
#define MI_SESSION_IMAGE_SIZE (8 * 1024 * 1024)
#define MI_SESSION_WORKING_SET_SIZE (4 * 1024 * 1024)
#define MI_SESSION_SIZE (MI_SESSION_VIEW_SIZE + \
MI_SESSION_POOL_SIZE + \
MI_SESSION_IMAGE_SIZE + \
MI_SESSION_WORKING_SET_SIZE)
#define MI_SYSTEM_VIEW_SIZE (16 * 1024 * 1024)
#define MI_NUMBER_SYSTEM_PTES 22000
PULONG64
FORCEINLINE
MmGetPageDirectory(VOID)
{
return (PULONG64)__readcr3();
}
PMMPTE
FORCEINLINE
MiAddressToPxe(PVOID Address)
{
ULONG64 Offset = (ULONG64)Address >> (PXI_SHIFT - 3);
Offset &= PXI_MASK << 3;
return (PMMPTE)(PXE_BASE + Offset);
}
PMMPTE
FORCEINLINE
MiAddressToPpe(PVOID Address)
{
ULONG64 Offset = (ULONG64)Address >> (PPI_SHIFT - 3);
Offset &= 0x3FFFF << 3;
return (PMMPTE)(PPE_BASE + Offset);
}
PMMPTE
FORCEINLINE
_MiAddressToPde(PVOID Address)
{
ULONG64 Offset = (ULONG64)Address >> (PDI_SHIFT - 3);
Offset &= 0x7FFFFFF << 3;
return (PMMPTE)(PDE_BASE + Offset);
}
#define MiAddressToPde(x) _MiAddressToPde((PVOID)(x))
PMMPTE
FORCEINLINE
_MiAddressToPte(PVOID Address)
{
ULONG64 Offset = (ULONG64)Address >> (PTI_SHIFT - 3);
Offset &= 0xFFFFFFFFFULL << 3;
return (PMMPTE)(PTE_BASE + Offset);
}
#define MiAddressToPte(x) _MiAddressToPte((PVOID)(x))
/* Convert a PTE into a corresponding address */
PVOID
FORCEINLINE
MiPteToAddress(PMMPTE Pte)
{
/* Use signed math */
LONG64 Temp = (LONG64)Pte;
Temp <<= 25;
Temp >>= 16;
return (PVOID)Temp;
}
BOOLEAN
FORCEINLINE
MiIsPdeForAddressValid(PVOID Address)
{
return ((MiAddressToPxe(Address)->u.Hard.Valid) &&
(MiAddressToPpe(Address)->u.Hard.Valid) &&
(MiAddressToPde(Address)->u.Hard.Valid));
}
#define ADDR_TO_PAGE_TABLE(v) (((ULONG_PTR)(v)) / (512 * PAGE_SIZE))
#define ADDR_TO_PDE_OFFSET(v) ((((ULONG_PTR)(v)) / (512 * PAGE_SIZE)))
#define ADDR_TO_PTE_OFFSET(v) ((((ULONG_PTR)(v)) % (512 * PAGE_SIZE)) / PAGE_SIZE)
#define VAtoPXI(va) ((((ULONG64)va) >> PXI_SHIFT) & 0x1FF)
#define VAtoPPI(va) ((((ULONG64)va) >> PPI_SHIFT) & 0x1FF)
#define VAtoPDI(va) ((((ULONG64)va) >> PDI_SHIFT) & 0x1FF)
#define VAtoPTI(va) ((((ULONG64)va) >> PTI_SHIFT) & 0x1FF)
/* We don't use these hacks */
VOID
FORCEINLINE
MmUpdatePageDir(PEPROCESS Process, PVOID Address, ULONG Size)
{
/* Nothing to do */
}
VOID
FORCEINLINE
MmInitGlobalKernelPageDirectory(VOID)
{
/* Nothing to do */
}
#define IS_ALIGNED(addr, align) (((ULONG64)(addr) & (align - 1)) == 0)
#define IS_PAGE_ALIGNED(addr) IS_ALIGNED(addr, PAGE_SIZE)
/* Easy accessing PFN in PTE */
#define PFN_FROM_PTE(v) ((v)->u.Hard.PageFrameNumber)
// FIXME, only copied from x86
#define MI_MAKE_LOCAL_PAGE(x) ((x)->u.Hard.Global = 0)
#define MI_MAKE_DIRTY_PAGE(x) ((x)->u.Hard.Dirty = 1)
#define MI_PAGE_DISABLE_CACHE(x) ((x)->u.Hard.CacheDisable = 1)
#define MI_PAGE_WRITE_THROUGH(x) ((x)->u.Hard.WriteThrough = 1)
#define MI_PAGE_WRITE_COMBINED(x) ((x)->u.Hard.WriteThrough = 0)
#if !defined(CONFIG_SMP)
#define MI_IS_PAGE_WRITEABLE(x) ((x)->u.Hard.Write == 1)
#else
#define MI_IS_PAGE_WRITEABLE(x) ((x)->u.Hard.Writable == 1)
#endif
#define MI_IS_PAGE_COPY_ON_WRITE(x)((x)->u.Hard.CopyOnWrite == 1)
#define MI_IS_PAGE_DIRTY(x) ((x)->u.Hard.Dirty == 1)
#define MI_MAKE_OWNER_PAGE(x) ((x)->u.Hard.Owner = 1)
#if !defined(CONFIG_SMP)
#define MI_MAKE_WRITE_PAGE(x) ((x)->u.Hard.Write = 1)
#else
#define MI_MAKE_WRITE_PAGE(x) ((x)->u.Hard.Writable = 1)
#endif
// FIXME!!!
#define PAGE_TO_SECTION_PAGE_DIRECTORY_OFFSET(x) \
((x) / (4*1024*1024))
#define PAGE_TO_SECTION_PAGE_TABLE_OFFSET(x) \
((((x)) % (4*1024*1024)) / (4*1024))
#define NR_SECTION_PAGE_TABLES 1024
#define NR_SECTION_PAGE_ENTRIES 1024
//#define TEB_BASE 0x7FFDE000
#define MI_HYPERSPACE_PTES (256 - 1)
#define MI_ZERO_PTES (32)
#define MI_MAPPING_RANGE_START (ULONG)HYPER_SPACE
#define MI_MAPPING_RANGE_END (MI_MAPPING_RANGE_START + \
MI_HYPERSPACE_PTES * PAGE_SIZE)
#define MI_ZERO_PTE (PMMPTE)(MI_MAPPING_RANGE_END + \
PAGE_SIZE)
/* On x86, these two are the same */
#define MMPDE MMPTE
#define PMMPDE PMMPTE
/*
* FIXME - different architectures have different cache line sizes...
*/
#define MM_CACHE_LINE_SIZE 32

View file

@ -27,6 +27,8 @@
#include "../mips/intrin_i.h"
#elif defined(_M_ARM)
#include "../arm/intrin_i.h"
#elif defined(_M_AMD64)
#include "../amd64/intrin_i.h"
#else
#error "Unknown processor"
#endif

View file

@ -27,6 +27,8 @@
#include "../mips/ke.h"
#elif defined(_M_ARM)
#include "../arm/ke.h"
#elif defined(_M_AMD64)
#include "../amd64/ke.h"
#else
#error "Unknown processor"
#endif

View file

@ -27,6 +27,8 @@
#include <internal/mips/mm.h>
#elif defined(_M_ARM)
#include <internal/arm/mm.h>
#elif defined(_M_AMD64)
#include <internal/amd64/mm.h>
#else
#error "Unknown processor"
#endif