ReactOS SMP Bringup Lite:

- Add simple and hacky spinlock implementation to HAL and kernel. It treats queued spinlocks as normal spinlocks and does not support debug timeout, but works well enough to expose bugs in spinlock usage.
- Implement KiProcessDeferredReadyList, a simple wrapper around KiDeferredReadyThread.
- Add missing PRCB locking in KiDeferredReadyThread.
- Implement SMP handling in KiRetireDpcList.
- Acquire the spinlock in KeSynchronizeExecution for SMP.
- Stub and export Kii386SpinOnSpinLock -- not used yet.
- Fix locking in KiAcquire/ReleaseDispatcherLockAtDpcLevel -- KeAcquireQueuedSpinLockAtDpcLevel takes the lock queue pointer, not the index.
- "Implement" KiRundownThread for SMP.
- Fix some sanity checks in the SMP context switch code and implement a stubbed SMP case.
- Define SYNCH_LEVEL in asm.h and use it in HAL's KeRaiseIrqlToSynchLevel.

To try the MP kernel on UP (it does not work on multiple processors yet) compile ntkrnlmp, rename it to ntoskrnl and add it to your installation. This requires a HAL with CONFIG_SMP enabled in HAL's spinlock.c and irq.S. Booting with the MP kernel does not work because of a weird regression introduced sometime in the last few months --  it appears MmProbeAndLockPages fails for a user mode stack pointer (no, I have no idea why).

svn path=/trunk/; revision=43244
This commit is contained in:
Stefan Ginsberg 2009-09-30 20:30:57 +00:00
parent 56bc75a184
commit 9fdf04b17c
11 changed files with 491 additions and 83 deletions

View file

@ -7,6 +7,9 @@
/* INCLUDES ******************************************************************/
/* Enable this (and the define in spinlock.c) to make UP HAL work for MP Kernel */
/* #define CONFIG_SMP */
#include <asm.h>
#include <internal/i386/asmmacro.S>
.intel_syntax noprefix
@ -600,20 +603,20 @@ _KeRaiseIrqlToSynchLevel@0:
cli
/* Mask out interrupts */
mov eax, KiI8259MaskTable[DISPATCH_LEVEL*4]
mov eax, KiI8259MaskTable[SYNCH_LEVEL*4]
or eax, PCR[KPCR_IDR]
out 0x21, al
shr eax, 8
out 0xA1, al
/* Return the old IRQL, enable interrupts and set to DISPATCH */
/* Return the old IRQL, enable interrupts and set to SYNCH */
mov eax, PCR[KPCR_IRQL]
mov dword ptr PCR[KPCR_IRQL], DISPATCH_LEVEL
mov dword ptr PCR[KPCR_IRQL], SYNCH_LEVEL
popf
#if DBG
/* Validate raise */
cmp eax, DISPATCH_LEVEL
cmp eax, SYNCH_LEVEL
ja InvalidSyRaise
#endif
@ -625,7 +628,7 @@ InvalidSyRaise:
/* Bugcheck the system */
push 2
push 0
push DISPATCH_LEVEL
push SYNCH_LEVEL
push eax
push IRQL_NOT_GREATER_OR_EQUAL
call _KeBugCheckEx@20

View file

@ -8,40 +8,113 @@
/* INCLUDES ******************************************************************/
/* Enable this (and the define in irq.S) to make UP HAL work for MP Kernel */
/* #define CONFIG_SMP */
#include <hal.h>
#define NDEBUG
#include <debug.h>
#undef KeAcquireSpinLock
#undef KeReleaseSpinLock
#undef KeLowerIrql
#undef KeRaiseIrql
//
// This is duplicated from ke_x.h
//
#ifdef CONFIG_SMP
//
// Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* Make sure that we don't own the lock already */
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
{
/* We do, bugcheck! */
KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
}
for (;;)
{
/* Try to acquire it */
if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
{
/* Value changed... wait until it's locked */
while (*(volatile KSPIN_LOCK *)SpinLock == 1)
{
#ifdef DBG
/* On debug builds, we use a much slower but useful routine */
//Kii386SpinOnSpinLock(SpinLock, 5);
/* FIXME: Do normal yield for now */
YieldProcessor();
#else
/* Otherwise, just yield and keep looping */
YieldProcessor();
#endif
}
}
else
{
#ifdef DBG
/* On debug builds, we OR in the KTHREAD */
*SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
#endif
/* All is well, break out */
break;
}
}
}
//
// Spinlock Release at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
#ifdef DBG
/* Make sure that the threads match */
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
{
/* They don't, bugcheck */
KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
}
#endif
/* Clear the lock */
InterlockedAnd((PLONG)SpinLock, 0);
}
#else
//
// Spinlock Acquire at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
}
//
// Spinlock Release at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
}
#endif
/* FUNCTIONS *****************************************************************/
/*
* @implemented
*/
VOID
NTAPI
KeLowerIrql(KIRQL NewIrql)
{
/* Call the fastcall function */
KfLowerIrql(NewIrql);
}
/*
* @implemented
*/
VOID
NTAPI
KeRaiseIrql(KIRQL NewIrql,
PKIRQL OldIrql)
{
/* Call the fastcall function */
*OldIrql = KfRaiseIrql(NewIrql);
}
/*
* @implemented
*/
@ -61,8 +134,14 @@ KIRQL
FASTCALL
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
{
/* Simply raise to dispatch */
return KfRaiseIrql(DISPATCH_LEVEL);
KIRQL OldIrql;
/* Raise to sync */
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
/* Acquire the lock and return */
KxAcquireSpinLock(SpinLock);
return OldIrql;
}
/*
@ -84,8 +163,12 @@ KIRQL
FASTCALL
KfAcquireSpinLock(PKSPIN_LOCK SpinLock)
{
/* Simply raise to dispatch */
return KfRaiseIrql(DISPATCH_LEVEL);
KIRQL OldIrql;
/* Raise to dispatch and acquire the lock */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
KxAcquireSpinLock(SpinLock);
return OldIrql;
}
/*
@ -96,8 +179,9 @@ FASTCALL
KfReleaseSpinLock(PKSPIN_LOCK SpinLock,
KIRQL OldIrql)
{
/* Simply lower IRQL back */
KfLowerIrql(OldIrql);
/* Release the lock and lower IRQL back */
KxReleaseSpinLock(SpinLock);
KeLowerIrql(OldIrql);
}
/*
@ -107,8 +191,14 @@ KIRQL
FASTCALL
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
/* Simply raise to dispatch */
return KfRaiseIrql(DISPATCH_LEVEL);
KIRQL OldIrql;
/* Raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
/* Acquire the lock */
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
return OldIrql;
}
/*
@ -118,8 +208,14 @@ KIRQL
FASTCALL
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
/* Simply raise to dispatch */
return KfRaiseIrql(DISPATCH_LEVEL);
KIRQL OldIrql;
/* Raise to synch */
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
/* Acquire the lock */
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
return OldIrql;
}
/*
@ -130,8 +226,15 @@ FASTCALL
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
/* Simply raise to dispatch */
LockHandle->OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
/* Set up the lock */
LockHandle->LockQueue.Next = NULL;
LockHandle->LockQueue.Lock = SpinLock;
/* Raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &LockHandle->OldIrql);
/* Acquire the lock */
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
}
/*
@ -142,8 +245,15 @@ FASTCALL
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
/* Simply raise to synch */
LockHandle->OldIrql = KfRaiseIrql(SYNCH_LEVEL);
/* Set up the lock */
LockHandle->LockQueue.Next = NULL;
LockHandle->LockQueue.Lock = SpinLock;
/* Raise to synch */
KeRaiseIrql(SYNCH_LEVEL, &LockHandle->OldIrql);
/* Acquire the lock */
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
}
/*
@ -154,8 +264,11 @@ FASTCALL
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
IN KIRQL OldIrql)
{
/* Simply lower IRQL back */
KfLowerIrql(OldIrql);
/* Release the lock */
KxReleaseSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
/* Lower IRQL back */
KeLowerIrql(OldIrql);
}
/*
@ -166,7 +279,8 @@ FASTCALL
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
{
/* Simply lower IRQL back */
KfLowerIrql(LockHandle->OldIrql);
KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
KeLowerIrql(LockHandle->OldIrql);
}
/*
@ -177,8 +291,13 @@ FASTCALL
KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
IN PKIRQL OldIrql)
{
/* Simply raise to dispatch */
*OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
#ifdef CONFIG_SMP
ASSERT(FALSE); // FIXME: Unused
while (TRUE);
#endif
/* Simply raise to synch */
KeRaiseIrql(SYNCH_LEVEL, OldIrql);
/* Always return true on UP Machines */
return TRUE;
@ -192,11 +311,39 @@ FASTCALL
KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
OUT PKIRQL OldIrql)
{
#ifdef CONFIG_SMP
ASSERT(FALSE); // FIXME: Unused
while (TRUE);
#endif
/* Simply raise to dispatch */
*OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
/* Always return true on UP Machines */
return TRUE;
}
/* EOF */
#undef KeRaiseIrql
/*
* @implemented
*/
VOID
NTAPI
KeRaiseIrql(KIRQL NewIrql,
PKIRQL OldIrql)
{
/* Call the fastcall function */
*OldIrql = KfRaiseIrql(NewIrql);
}
#undef KeLowerIrql
/*
* @implemented
*/
VOID
NTAPI
KeLowerIrql(KIRQL NewIrql)
{
/* Call the fastcall function */
KfLowerIrql(NewIrql);
}

View file

@ -568,8 +568,22 @@ Author:
#define APC_LEVEL 0x1
#define DISPATCH_LEVEL 0x2
#define CLOCK2_LEVEL 0x1C
#define IPI_LEVEL 0x1D
#define HIGH_LEVEL 0x1F
//
// Synchronization-level IRQL
//
#ifndef CONFIG_SMP
#define SYNCH_LEVEL DISPATCH_LEVEL
#else
#if (NTDDI_VERSION < NTDDI_WS03)
#define SYNCH_LEVEL (IPI_LEVEL - 0x1)
#else
#define SYNCH_LEVEL (IPI_LEVEL - 0x2)
#endif
#endif
//
// Quantum Decrements
//

View file

@ -245,7 +245,7 @@ FASTCALL
KiExitDispatcher(KIRQL OldIrql);
VOID
NTAPI
FASTCALL
KiDeferredReadyThread(IN PKTHREAD Thread);
PKTHREAD

View file

@ -437,12 +437,20 @@ FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* Make sure that we don't own the lock already */
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
{
/* We do, bugcheck! */
KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
}
/* Start acquire loop */
for (;;)
{
/* Try to acquire it */
if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
{
/* Value changed... wait until it's locked */
/* Value changed... wait until it's unlocked */
while (*(volatile KSPIN_LOCK *)SpinLock == 1)
{
#if DBG
@ -553,7 +561,8 @@ VOID
KiAcquireDispatcherLockAtDpcLevel(VOID)
{
/* Acquire the dispatcher lock */
KeAcquireQueuedSpinLockAtDpcLevel(LockQueueDispatcherLock);
KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->
LockQueue[LockQueueDispatcherLock]);
}
FORCEINLINE
@ -561,11 +570,12 @@ VOID
KiReleaseDispatcherLockFromDpcLevel(VOID)
{
/* Release the dispatcher lock */
KeReleaseQueuedSpinLockFromDpcLevel(LockQueueDispatcherLock);
KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
LockQueue[LockQueueDispatcherLock]);
}
//
// This routine inserts a thread into the deferred ready list of the given CPU
// This routine inserts a thread into the deferred ready list of the current CPU
//
FORCEINLINE
VOID
@ -613,7 +623,7 @@ KiSetThreadSwapBusy(IN PKTHREAD Thread)
// This routine acquires the PRCB lock so that only one caller can touch
// volatile PRCB data.
//
// Since this is a simple optimized spin-lock, it must be be only acquired
// Since this is a simple optimized spin-lock, it must only be acquired
// at dispatcher level or higher!
//
FORCEINLINE
@ -649,7 +659,8 @@ FORCEINLINE
VOID
KiReleasePrcbLock(IN PKPRCB Prcb)
{
/* Make sure it's acquired! */
/* Make sure we are above dispatch and the lock is acquired! */
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
ASSERT(Prcb->PrcbLock != 0);
/* Release it */
@ -696,6 +707,9 @@ FORCEINLINE
VOID
KiReleaseThreadLock(IN PKTHREAD Thread)
{
/* Make sure we are still above dispatch */
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
/* Release it */
InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
}
@ -729,10 +743,8 @@ FORCEINLINE
VOID
KiRundownThread(IN PKTHREAD Thread)
{
#if defined(_M_IX86) || defined(_M_AMD64)
/* FIXME: TODO */
ASSERTMSG("Not yet implemented\n", FALSE);
#endif
/* Nothing to do */
return;
}
FORCEINLINE

View file

@ -550,6 +550,9 @@ KiRetireDpcList(IN PKPRCB Prcb)
PKDEFERRED_ROUTINE DeferredRoutine;
PVOID DeferredContext, SystemArgument1, SystemArgument2;
ULONG_PTR TimerHand;
#ifdef CONFIG_SMP
KIRQL OldIrql;
#endif
/* Get data and list variables before starting anything else */
DpcData = &Prcb->DpcData[DPC_NORMAL];
@ -631,12 +634,23 @@ KiRetireDpcList(IN PKPRCB Prcb)
Prcb->DpcRoutineActive = FALSE;
Prcb->DpcInterruptRequested = FALSE;
#ifdef CONFIG_SMP
/* Check if we have deferred threads */
if (Prcb->DeferredReadyListHead.Next)
{
/* FIXME: 2K3-style scheduling not implemeted */
ASSERT(FALSE);
/* Re-enable interrupts and raise to synch */
_enable();
OldIrql = KeRaiseIrqlToSynchLevel();
/* Process deferred threads */
KiProcessDeferredReadyList(Prcb);
/* Lower IRQL back and disable interrupts */
KeLowerIrql(OldIrql);
_disable();
}
#endif
} while (DpcData->DpcQueueDepth != 0);
}

View file

@ -351,8 +351,8 @@ GetSwapLock:
jz NotBusy
pause
jmp GetSwapLock
#endif
NotBusy:
#endif
/* Increase context switches (use ES for lazy load) */
inc dword ptr es:[ebx+KPCR_CONTEXT_SWITCHES]
@ -381,9 +381,9 @@ AfterTrace:
/* Check NPX State */
cmp byte ptr [edi+KTHREAD_NPX_STATE], NPX_STATE_LOADED
jz NpxLoaded
SetStack:
#endif
SetStack:
/* Set new stack */
mov [edi+KTHREAD_KERNEL_STACK], esp
@ -423,7 +423,7 @@ StackOk:
test [ebp+KPROCESS_ACTIVE_PROCESSORS], ecx
jz WrongActiveCpu
test [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
jz WrongActiveCpu
jnz WrongActiveCpu
#endif
#endif
@ -563,8 +563,29 @@ NewCr0:
#ifdef CONFIG_SMP
NpxLoaded:
/* FIXME: TODO */
int 3
/* Mask out FPU flags */
and edx, ~(CR0_MP + CR0_EM + CR0_TS)
/* Get the NPX Frame */
mov ecx, [edi+KTHREAD_INITIAL_STACK]
sub ecx, NPX_FRAME_LENGTH
/* Check if we have a new CR0 */
cmp ebp, edx
jz Cr0Equal
/* We do, update it */
mov cr0, edx
mov ebp, edx
Cr0Equal:
/* Save the NPX State */
fxsave [ecx]
mov byte ptr [edi+KTHREAD_NPX_STATE], NPX_STATE_NOT_LOADED
/* Clear the NPX Thread */
mov dword ptr [ebx+KPCR_NPX_THREAD], 0
/* Jump back */
jmp SetStack
@ -762,7 +783,7 @@ SwapContext:
#ifdef CONFIG_SMP
SameThread:
/* Clear the next thread, and put the thready as ready after lock release */
/* Clear the next thread, and put the thread as ready after lock release */
and dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
and dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
and byte ptr [edi+KTHREAD_STATE_], Ready
@ -837,9 +858,9 @@ _KiSwapProcess@8:
/* Sanity check */
#if DBG
test dword ptr [edx+KPROCESS_ACTIVE_PROCESSORS], 0
test [edx+KPROCESS_ACTIVE_PROCESSORS], ecx
jz WrongCpu1
test dword ptr [eax+KPROCESS_ACTIVE_PROCESSORS], 0
test [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
jnz WrongCpu2
#endif
#endif

View file

@ -2736,12 +2736,26 @@ _KeSynchronizeExecution@12:
/* Go to DIRQL */
mov cl, [ebx+KINTERRUPT_SYNCHRONIZE_IRQL]
call @KfRaiseIrql@4
push eax
#ifdef CONFIG_SMP
/* Acquire the interrupt spinlock FIXME: Write this in assembly */
mov ecx, [ebx+KINTERRUPT_ACTUAL_LOCK]
call @KefAcquireSpinLockAtDpcLevel@4
#endif
/* Call the routine */
push eax
push [esp+20]
call [esp+20]
#ifdef CONFIG_SMP
/* Release the interrupt spinlock FIXME: Write this in assembly */
push eax
mov ecx, [ebx+KINTERRUPT_ACTUAL_LOCK]
call @KefReleaseSpinLockFromDpcLevel@4
pop eax
#endif
/* Lower IRQL */
mov ebx, eax
pop ecx
@ -2752,3 +2766,32 @@ _KeSynchronizeExecution@12:
pop ebx
ret 12
.endfunc
/*++
* Kii386SpinOnSpinLock
*
* FILLMEIN
*
* Params:
* SpinLock - FILLMEIN
*
* Flags - FILLMEIN
*
* Returns:
* None.
*
* Remarks:
* FILLMEIN
*
*--*/
.globl _Kii386SpinOnSpinLock@8
.func Kii386SpinOnSpinLock@8
_Kii386SpinOnSpinLock@8:
#ifdef CONFIG_SMP
/* FIXME: TODO */
int 3
#endif
ret 8
.endfunc

View file

@ -17,6 +17,11 @@
/* PRIVATE FUNCTIONS *********************************************************/
#if 0
//
// FIXME: The queued spinlock routines are broken.
//
VOID
FASTCALL
KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
@ -84,6 +89,55 @@ KeReleaseQueuedSpinLockFromDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
#endif
}
#else
//
// HACK: Hacked to work like normal spinlocks
//
VOID
FASTCALL
KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
{
#ifdef CONFIG_SMP
/* Make sure we are at DPC or above! */
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
{
/* We aren't -- bugcheck */
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
(ULONG_PTR)LockHandle->Lock,
KeGetCurrentIrql(),
0,
0);
}
/* Do the inlined function */
KxAcquireSpinLock(LockHandle->Lock);
#endif
}
VOID
FASTCALL
KeReleaseQueuedSpinLockFromDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
{
#ifdef CONFIG_SMP
/* Make sure we are at DPC or above! */
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
{
/* We aren't -- bugcheck */
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
(ULONG_PTR)LockHandle->Lock,
KeGetCurrentIrql(),
0,
0);
}
/* Do the inlined function */
KxReleaseSpinLock(LockHandle->Lock);
#endif
}
#endif
/* PUBLIC FUNCTIONS **********************************************************/
/*
@ -137,6 +191,17 @@ VOID
NTAPI
KeAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
{
/* Make sure we are at DPC or above! */
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
{
/* We aren't -- bugcheck */
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
(ULONG_PTR)SpinLock,
KeGetCurrentIrql(),
0,
0);
}
/* Do the inlined function */
KxAcquireSpinLock(SpinLock);
}
@ -149,7 +214,18 @@ VOID
NTAPI
KeReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
{
/* Do the lined function */
/* Make sure we are at DPC or above! */
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
{
/* We aren't -- bugcheck */
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
(ULONG_PTR)SpinLock,
KeGetCurrentIrql(),
0,
0);
}
/* Do the inlined function */
KxReleaseSpinLock(SpinLock);
}
@ -160,6 +236,17 @@ VOID
FASTCALL
KefAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
{
/* Make sure we are at DPC or above! */
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
{
/* We aren't -- bugcheck */
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
(ULONG_PTR)SpinLock,
KeGetCurrentIrql(),
0,
0);
}
/* Do the inlined function */
KxAcquireSpinLock(SpinLock);
}
@ -171,7 +258,18 @@ VOID
FASTCALL
KefReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
{
/* Do the lined function */
/* Make sure we are at DPC or above! */
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
{
/* We aren't -- bugcheck */
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
(ULONG_PTR)SpinLock,
KeGetCurrentIrql(),
0,
0);
}
/* Do the inlined function */
KxReleaseSpinLock(SpinLock);
}
@ -243,7 +341,23 @@ KeAcquireInStackQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock,
/* Set it up properly */
LockHandle->LockQueue.Next = NULL;
LockHandle->LockQueue.Lock = SpinLock;
#if 0
KeAcquireQueuedSpinLockAtDpcLevel(LockHandle->LockQueue.Next);
#else
/* Make sure we are at DPC or above! */
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
{
/* We aren't -- bugcheck */
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
(ULONG_PTR)LockHandle->LockQueue.Lock,
KeGetCurrentIrql(),
0,
0);
}
/* Acquire the lock */
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
#endif
#endif
}
@ -255,8 +369,24 @@ FASTCALL
KeReleaseInStackQueuedSpinLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE LockHandle)
{
#ifdef CONFIG_SMP
#if 0
/* Call the internal function */
KeReleaseQueuedSpinLockFromDpcLevel(LockHandle->LockQueue.Next);
#else
/* Make sure we are at DPC or above! */
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
{
/* We aren't -- bugcheck */
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
(ULONG_PTR)LockHandle->LockQueue.Lock,
KeGetCurrentIrql(),
0,
0);
}
/* Release the lock */
KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
#endif
#endif
}
@ -324,5 +454,3 @@ KeTestSpinLock(IN PKSPIN_LOCK SpinLock)
/* Spinlock appears to be free */
return TRUE;
}
/* EOF */

View file

@ -24,7 +24,7 @@ FASTCALL
KiIdleSchedule(IN PKPRCB Prcb)
{
/* FIXME: TODO */
ASSERTMSG("Not yet implemented\n", FALSE);
ASSERTMSG("SMP: Not yet implemented\n", FALSE);
return NULL;
}
@ -32,8 +32,29 @@ VOID
FASTCALL
KiProcessDeferredReadyList(IN PKPRCB Prcb)
{
/* FIXME: TODO */
ASSERTMSG("Not yet implemented\n", FALSE);
PSINGLE_LIST_ENTRY ListEntry;
PKTHREAD Thread;
/* Make sure there is something on the ready list */
ASSERT(Prcb->DeferredReadyListHead.Next != NULL);
/* Get the first entry and clear the list */
ListEntry = Prcb->DeferredReadyListHead.Next;
Prcb->DeferredReadyListHead.Next = NULL;
/* Start processing loop */
do
{
/* Get the thread and advance to the next entry */
Thread = CONTAINING_RECORD(ListEntry, KTHREAD, SwapListEntry);
ListEntry = ListEntry->Next;
/* Make the thread ready */
KiDeferredReadyThread(Thread);
} while (ListEntry != NULL);
/* Make sure the ready list is still empty */
ASSERT(Prcb->DeferredReadyListHead.Next == NULL);
}
VOID
@ -46,7 +67,7 @@ KiQueueReadyThread(IN PKTHREAD Thread,
}
VOID
NTAPI
FASTCALL
KiDeferredReadyThread(IN PKTHREAD Thread)
{
PKPRCB Prcb;
@ -191,9 +212,10 @@ KiDeferredReadyThread(IN PKTHREAD Thread)
OldPriority = Thread->Priority;
Thread->Preempted = FALSE;
/* Queue the thread on CPU 0 and get the PRCB */
/* Queue the thread on CPU 0 and get the PRCB and lock it */
Thread->NextProcessor = 0;
Prcb = KiProcessorBlock[0];
KiAcquirePrcbLock(Prcb);
/* Check if we have an idle summary */
if (KiIdleSummary)
@ -202,6 +224,9 @@ KiDeferredReadyThread(IN PKTHREAD Thread)
KiIdleSummary = 0;
Thread->State = Standby;
Prcb->NextThread = Thread;
/* Unlock the PRCB and return */
KiReleasePrcbLock(Prcb);
return;
}
@ -300,6 +325,7 @@ KiSelectNextThread(IN PKPRCB Prcb)
Prcb->IdleSchedule = TRUE;
/* FIXME: SMT support */
ASSERTMSG("SMP: Not yet implemented\n", FALSE);
}
/* Sanity checks and return the thread */

View file

@ -718,7 +718,7 @@
@ fastcall KiReleaseSpinLock(ptr)
@ cdecl KiUnexpectedInterrupt()
#ifdef _M_IX86
;Kii386SpinOnSpinLock
@ stdcall Kii386SpinOnSpinLock(ptr long)
#endif
@ stdcall LdrAccessResource(ptr ptr ptr ptr)
@ stdcall LdrEnumResources(ptr ptr long ptr ptr)