mirror of
https://github.com/reactos/reactos.git
synced 2025-08-03 22:46:17 +00:00
[NTOS]: Rework the trap macros again. This time we needed some added complexity because of the need for runtime patching at boot. We got away with it nicely and basically implement a simple "if" in ASM, which gives us a deterministic set of instructions (vs the compiler). We then patch if if needed (see next bullet).
[NTOS]: Support systems without SYSENTER. Nothing was actually disabling the SYSEXIT mechanism recently enabled. Now IRET is always used unless a SYSENTER machine is discovered, in which case the jmp to the IRET code is patched with a jmp to SYSEXIT code. [PERF]: Set WP0 bit in CR0 later, thus allowing the existing runtime patches (and this new one) to function without requiring MmSetPageProtect. Saves TLB flushes and page table walking/mapping on boot. Left in some debug prints to see what buildbot reports...will remove if it survives. svn path=/trunk/; revision=45280
This commit is contained in:
parent
e6111d4302
commit
90e0457d60
4 changed files with 146 additions and 48 deletions
|
@ -456,6 +456,9 @@ extern VOID NTAPI ExpInterlockedPopEntrySListFault(VOID);
|
|||
extern VOID __cdecl CopyParams(VOID);
|
||||
extern VOID __cdecl ReadBatch(VOID);
|
||||
extern VOID __cdecl FrRestore(VOID);
|
||||
extern CHAR KiSystemCallExitBranch[];
|
||||
extern CHAR KiSystemCallExit[];
|
||||
extern CHAR KiSystemCallExit2[];
|
||||
|
||||
//
|
||||
// Trap Macros
|
||||
|
|
|
@ -190,7 +190,7 @@ KiUserTrap(IN PKTRAP_FRAME TrapFrame)
|
|||
//
|
||||
VOID
|
||||
FORCEINLINE
|
||||
DECLSPEC_NORETURN
|
||||
/* Do not mark this as DECLSPEC_NORETURN because possibly executing code follows it! */
|
||||
KiSystemCallReturn(IN PKTRAP_FRAME TrapFrame)
|
||||
{
|
||||
/* Restore nonvolatiles, EAX, and do a "jump" back to the kernel caller */
|
||||
|
@ -205,6 +205,7 @@ KiSystemCallReturn(IN PKTRAP_FRAME TrapFrame)
|
|||
"movl %c[e](%%esp), %%edx\n"
|
||||
"addl $%c[v],%%esp\n" /* A WHOLE *KERNEL* frame since we're not IRET'ing */
|
||||
"jmp *%%edx\n"
|
||||
".globl _KiSystemCallExit2\n_KiSystemCallExit2:\n"
|
||||
:
|
||||
: "r"(TrapFrame),
|
||||
[b] "i"(KTRAP_FRAME_EBX),
|
||||
|
@ -216,7 +217,6 @@ KiSystemCallReturn(IN PKTRAP_FRAME TrapFrame)
|
|||
[v] "i"(KTRAP_FRAME_ESP)
|
||||
: "%esp"
|
||||
);
|
||||
UNREACHABLE;
|
||||
}
|
||||
|
||||
VOID
|
||||
|
@ -227,6 +227,7 @@ KiSystemCallTrapReturn(IN PKTRAP_FRAME TrapFrame)
|
|||
/* Regular interrupt exit, but we only restore EAX as a volatile */
|
||||
__asm__ __volatile__
|
||||
(
|
||||
".globl _KiSystemCallExit\n_KiSystemCallExit:\n"
|
||||
"movl %0, %%esp\n"
|
||||
"movl %c[b](%%esp), %%ebx\n"
|
||||
"movl %c[s](%%esp), %%esi\n"
|
||||
|
@ -405,6 +406,29 @@ KiIssueBop(VOID)
|
|||
asm volatile(".byte 0xC4\n.byte 0xC4\n");
|
||||
}
|
||||
|
||||
VOID
|
||||
FORCEINLINE
|
||||
KiUserSystemCall(IN PKTRAP_FRAME TrapFrame)
|
||||
{
|
||||
/*
|
||||
* Kernel call or user call?
|
||||
*
|
||||
* This decision is made in inlined assembly because we need to patch
|
||||
* the relative offset of the user-mode jump to point to the SYSEXIT
|
||||
* routine if the CPU supports it. The only way to guarantee that a
|
||||
* relative jnz/jz instruction is generated is to force it with the
|
||||
* inline assembler.
|
||||
*/
|
||||
asm volatile
|
||||
(
|
||||
"test $1, %0\n" /* MODE_MASK */
|
||||
".globl _KiSystemCallExitBranch\n_KiSystemCallExitBranch:\n"
|
||||
"jnz _KiSystemCallExit\n"
|
||||
:
|
||||
: "r"(TrapFrame->SegCs)
|
||||
);
|
||||
}
|
||||
|
||||
//
|
||||
// Generic Exit Routine
|
||||
//
|
||||
|
@ -502,43 +526,35 @@ KiExitTrap(IN PKTRAP_FRAME TrapFrame,
|
|||
/* Check for system call -- a system call skips volatiles! */
|
||||
if (__builtin_expect(SkipBits.SkipVolatiles, 0)) /* More INTs than SYSCALLs */
|
||||
{
|
||||
/* Kernel call or user call? */
|
||||
if (__builtin_expect(KiUserTrap(TrapFrame), 1)) /* More Ring 3 than 0 */
|
||||
{
|
||||
/* Is SYSENTER supported and/or enabled, or are we stepping code? */
|
||||
if (__builtin_expect((KiFastSystemCallDisable) ||
|
||||
(TrapFrame->EFlags & EFLAGS_TF), 0))
|
||||
{
|
||||
/* Exit normally */
|
||||
KiSystemCallTrapReturn(TrapFrame);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Restore user FS */
|
||||
Ke386SetFs(KGDT_R3_TEB | RPL_MASK);
|
||||
|
||||
/* Remove interrupt flag */
|
||||
TrapFrame->EFlags &= ~EFLAGS_INTERRUPT_MASK;
|
||||
__writeeflags(TrapFrame->EFlags);
|
||||
|
||||
/* Exit through SYSEXIT */
|
||||
KiSystemCallSysExitReturn(TrapFrame);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Restore EFLags */
|
||||
__writeeflags(TrapFrame->EFlags);
|
||||
/* User or kernel call? */
|
||||
KiUserSystemCall(TrapFrame);
|
||||
|
||||
/* Restore EFLags */
|
||||
__writeeflags(TrapFrame->EFlags);
|
||||
|
||||
/* Call is kernel, so do a jump back since this wasn't a real INT */
|
||||
KiSystemCallReturn(TrapFrame);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Return from interrupt */
|
||||
KiTrapReturn(TrapFrame);
|
||||
/* Call is kernel, so do a jump back since this wasn't a real INT */
|
||||
KiSystemCallReturn(TrapFrame);
|
||||
|
||||
/* If we got here, this is SYSEXIT: are we stepping code? */
|
||||
if (!(TrapFrame->EFlags & EFLAGS_TF))
|
||||
{
|
||||
/* Restore user FS */
|
||||
Ke386SetFs(KGDT_R3_TEB | RPL_MASK);
|
||||
|
||||
/* Remove interrupt flag */
|
||||
TrapFrame->EFlags &= ~EFLAGS_INTERRUPT_MASK;
|
||||
__writeeflags(TrapFrame->EFlags);
|
||||
|
||||
/* Exit through SYSEXIT */
|
||||
KiSystemCallSysExitReturn(TrapFrame);
|
||||
}
|
||||
|
||||
/* Exit through IRETD, either due to debugging or due to lack of SYSEXIT */
|
||||
KiSystemCallTrapReturn(TrapFrame);
|
||||
}
|
||||
|
||||
/* Return from interrupt */
|
||||
KiTrapReturn(TrapFrame);
|
||||
}
|
||||
|
||||
//
|
||||
|
|
|
@ -45,6 +45,15 @@ KAFFINITY KeActiveProcessors = 1;
|
|||
BOOLEAN KiI386PentiumLockErrataPresent;
|
||||
BOOLEAN KiSMTProcessorsPresent;
|
||||
|
||||
/* The distance between SYSEXIT and IRETD return modes */
|
||||
UCHAR KiSystemCallExitAdjust;
|
||||
|
||||
/* The offset that was applied -- either 0 or the value above */
|
||||
UCHAR KiSystemCallExitAdjusted;
|
||||
|
||||
/* Whether the adjustment was already done once */
|
||||
BOOLEAN KiFastCallCopyDoneOnce;
|
||||
|
||||
/* Flush data */
|
||||
volatile LONG KiTbFlushTimeStamp;
|
||||
|
||||
|
@ -799,18 +808,94 @@ KiLoadFastSyscallMachineSpecificRegisters(IN ULONG_PTR Context)
|
|||
return 0;
|
||||
}
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
KiDisableFastSyscallReturn(VOID)
|
||||
{
|
||||
/* Was it applied? */
|
||||
if (KiSystemCallExitAdjusted)
|
||||
{
|
||||
/* Restore the original value */
|
||||
KiSystemCallExitBranch[1] = KiSystemCallExitBranch[1] - KiSystemCallExitAdjusted;
|
||||
|
||||
/* It's not adjusted anymore */
|
||||
KiSystemCallExitAdjusted = FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
KiEnableFastSyscallReturn(VOID)
|
||||
{
|
||||
/* Check if the patch has already been done */
|
||||
if ((KiSystemCallExitAdjusted == KiSystemCallExitAdjust) &&
|
||||
(KiFastCallCopyDoneOnce))
|
||||
{
|
||||
DPRINT1("SYSEXIT Code Patch was already done!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Make sure the offset is within the distance of a Jxx SHORT */
|
||||
if ((KiSystemCallExitBranch[1] - KiSystemCallExitAdjust) < 0x80)
|
||||
{
|
||||
/* Remove any existing code patch */
|
||||
DPRINT1("Correct SHORT size found\n");
|
||||
KiDisableFastSyscallReturn();
|
||||
|
||||
/* We should have a JNZ there */
|
||||
ASSERT(KiSystemCallExitBranch[0] == 0x75);
|
||||
|
||||
/* Do the patch */
|
||||
DPRINT1("Current jump offset: %lx\n", KiSystemCallExitBranch[1]);
|
||||
KiSystemCallExitAdjusted = KiSystemCallExitAdjust;
|
||||
KiSystemCallExitBranch[1] -= KiSystemCallExitAdjusted;
|
||||
DPRINT1("New jump offset: %lx\n", KiSystemCallExitBranch[1]);
|
||||
|
||||
/* Remember that we've done it */
|
||||
KiFastCallCopyDoneOnce = TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* This shouldn't happen unless we've messed the macros up */
|
||||
DPRINT1("Your compiled kernel is broken!\n");
|
||||
DbgBreakPoint();
|
||||
}
|
||||
}
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
KiRestoreFastSyscallReturnState(VOID)
|
||||
{
|
||||
/* FIXME: NT has support for SYSCALL, IA64-SYSENTER, etc. */
|
||||
|
||||
/* Check if the CPU Supports fast system call */
|
||||
if (KeFeatureBits & KF_FAST_SYSCALL)
|
||||
{
|
||||
/* Check if it has been disabled */
|
||||
if (!KiFastSystemCallDisable)
|
||||
{
|
||||
/* KiSystemCallExit2 should come BEFORE KiSystemCallExit */
|
||||
DPRINT1("Exit2: %p Exit1: %p\n", KiSystemCallExit2, KiSystemCallExit);
|
||||
ASSERT(KiSystemCallExit2 < KiSystemCallExit);
|
||||
|
||||
/* It's enabled, so we'll have to do a code patch */
|
||||
KiSystemCallExitAdjust = KiSystemCallExit - KiSystemCallExit2;
|
||||
DPRINT1("SYSENTER Capable Machine. Jump Offset Delta: %lx\n", KiSystemCallExitAdjust);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Disable fast system call */
|
||||
KeFeatureBits &= ~KF_FAST_SYSCALL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now check if all CPUs support fast system call, and the registry allows it */
|
||||
if (KeFeatureBits & KF_FAST_SYSCALL)
|
||||
{
|
||||
/* Do an IPI to enable it */
|
||||
KeIpiGenericCall(KiLoadFastSyscallMachineSpecificRegisters, 0);
|
||||
}
|
||||
|
||||
/* Perform the code patch that is required */
|
||||
KiEnableFastSyscallReturn();
|
||||
}
|
||||
|
||||
ULONG_PTR
|
||||
|
|
|
@ -31,7 +31,6 @@ VOID
|
|||
NTAPI
|
||||
KiInitMachineDependent(VOID)
|
||||
{
|
||||
ULONG Protect;
|
||||
ULONG CpuCount;
|
||||
BOOLEAN FbCaching = FALSE;
|
||||
NTSTATUS Status;
|
||||
|
@ -153,12 +152,7 @@ KiInitMachineDependent(VOID)
|
|||
/* FIXME: Implement and enable XMM Page Zeroing for Mm */
|
||||
|
||||
/* Patch the RtlPrefetchMemoryNonTemporal routine to enable it */
|
||||
Protect = MmGetPageProtect(NULL, RtlPrefetchMemoryNonTemporal);
|
||||
MmSetPageProtect(NULL,
|
||||
RtlPrefetchMemoryNonTemporal,
|
||||
Protect | PAGE_IS_WRITABLE);
|
||||
*(PCHAR)RtlPrefetchMemoryNonTemporal = 0x90;
|
||||
MmSetPageProtect(NULL, RtlPrefetchMemoryNonTemporal, Protect);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,6 +314,9 @@ KiInitMachineDependent(VOID)
|
|||
/* FIXME: TODO */
|
||||
DPRINT1("ISR Time Limit not yet supported\n");
|
||||
}
|
||||
|
||||
/* Set CR0 features based on detected CPU */
|
||||
KiSetCR0Bits();
|
||||
}
|
||||
|
||||
VOID
|
||||
|
@ -401,9 +398,6 @@ KiInitializeKernel(IN PKPROCESS InitProcess,
|
|||
/* Detect and set the CPU Type */
|
||||
KiSetProcessorType();
|
||||
|
||||
/* Set CR0 features based on detected CPU */
|
||||
KiSetCR0Bits();
|
||||
|
||||
/* Check if an FPU is present */
|
||||
NpxPresent = KiIsNpxPresent();
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue