mirror of
https://github.com/reactos/reactos.git
synced 2025-08-05 21:53:06 +00:00
ReactOS SMP Bringup Lite:
- Add simple and hacky spinlock implementation to HAL and kernel. It treats queued spinlocks as normal spinlocks and does not support debug timeout, but works well enough to expose bugs in spinlock usage. - Implement KiProcessDeferredReadyList, a simple wrapper around KiDeferredReadyThread. - Add missing PRCB locking in KiDeferredReadyThread. - Implement SMP handling in KiRetireDpcList. - Acquire the spinlock in KeSynchronizeExecution for SMP. - Stub and export Kii386SpinOnSpinLock -- not used yet. - Fix locking in KiAcquire/ReleaseDispatcherLockAtDpcLevel -- KeAcquireQueuedSpinLockAtDpcLevel takes the lock queue pointer, not the index. - "Implement" KiRundownThread for SMP. - Fix some sanity checks in the SMP context switch code and implement a stubbed SMP case. - Define SYNCH_LEVEL in asm.h and use it in HAL's KeRaiseIrqlToSynchLevel. To try the MP kernel on UP (it does not work on multiple processors yet) compile ntkrnlmp, rename it to ntoskrnl and add it to your installation. This requires a HAL with CONFIG_SMP enabled in HAL's spinlock.c and irq.S. Booting with the MP kernel does not work because of a weird regression introduced sometime in the last few months -- it appears MmProbeAndLockPages fails for a user mode stack pointer (no, I have no idea why). svn path=/trunk/; revision=43244
This commit is contained in:
parent
56bc75a184
commit
9fdf04b17c
11 changed files with 491 additions and 83 deletions
|
@ -7,6 +7,9 @@
|
||||||
|
|
||||||
/* INCLUDES ******************************************************************/
|
/* INCLUDES ******************************************************************/
|
||||||
|
|
||||||
|
/* Enable this (and the define in spinlock.c) to make UP HAL work for MP Kernel */
|
||||||
|
/* #define CONFIG_SMP */
|
||||||
|
|
||||||
#include <asm.h>
|
#include <asm.h>
|
||||||
#include <internal/i386/asmmacro.S>
|
#include <internal/i386/asmmacro.S>
|
||||||
.intel_syntax noprefix
|
.intel_syntax noprefix
|
||||||
|
@ -600,20 +603,20 @@ _KeRaiseIrqlToSynchLevel@0:
|
||||||
cli
|
cli
|
||||||
|
|
||||||
/* Mask out interrupts */
|
/* Mask out interrupts */
|
||||||
mov eax, KiI8259MaskTable[DISPATCH_LEVEL*4]
|
mov eax, KiI8259MaskTable[SYNCH_LEVEL*4]
|
||||||
or eax, PCR[KPCR_IDR]
|
or eax, PCR[KPCR_IDR]
|
||||||
out 0x21, al
|
out 0x21, al
|
||||||
shr eax, 8
|
shr eax, 8
|
||||||
out 0xA1, al
|
out 0xA1, al
|
||||||
|
|
||||||
/* Return the old IRQL, enable interrupts and set to DISPATCH */
|
/* Return the old IRQL, enable interrupts and set to SYNCH */
|
||||||
mov eax, PCR[KPCR_IRQL]
|
mov eax, PCR[KPCR_IRQL]
|
||||||
mov dword ptr PCR[KPCR_IRQL], DISPATCH_LEVEL
|
mov dword ptr PCR[KPCR_IRQL], SYNCH_LEVEL
|
||||||
popf
|
popf
|
||||||
|
|
||||||
#if DBG
|
#if DBG
|
||||||
/* Validate raise */
|
/* Validate raise */
|
||||||
cmp eax, DISPATCH_LEVEL
|
cmp eax, SYNCH_LEVEL
|
||||||
ja InvalidSyRaise
|
ja InvalidSyRaise
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -625,7 +628,7 @@ InvalidSyRaise:
|
||||||
/* Bugcheck the system */
|
/* Bugcheck the system */
|
||||||
push 2
|
push 2
|
||||||
push 0
|
push 0
|
||||||
push DISPATCH_LEVEL
|
push SYNCH_LEVEL
|
||||||
push eax
|
push eax
|
||||||
push IRQL_NOT_GREATER_OR_EQUAL
|
push IRQL_NOT_GREATER_OR_EQUAL
|
||||||
call _KeBugCheckEx@20
|
call _KeBugCheckEx@20
|
||||||
|
|
|
@ -8,40 +8,113 @@
|
||||||
|
|
||||||
/* INCLUDES ******************************************************************/
|
/* INCLUDES ******************************************************************/
|
||||||
|
|
||||||
|
/* Enable this (and the define in irq.S) to make UP HAL work for MP Kernel */
|
||||||
|
/* #define CONFIG_SMP */
|
||||||
|
|
||||||
#include <hal.h>
|
#include <hal.h>
|
||||||
#define NDEBUG
|
#define NDEBUG
|
||||||
#include <debug.h>
|
#include <debug.h>
|
||||||
|
|
||||||
#undef KeAcquireSpinLock
|
#undef KeAcquireSpinLock
|
||||||
#undef KeReleaseSpinLock
|
#undef KeReleaseSpinLock
|
||||||
#undef KeLowerIrql
|
|
||||||
#undef KeRaiseIrql
|
//
|
||||||
|
// This is duplicated from ke_x.h
|
||||||
|
//
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
//
|
||||||
|
// Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
|
||||||
|
//
|
||||||
|
FORCEINLINE
|
||||||
|
VOID
|
||||||
|
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
|
||||||
|
{
|
||||||
|
/* Make sure that we don't own the lock already */
|
||||||
|
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
|
||||||
|
{
|
||||||
|
/* We do, bugcheck! */
|
||||||
|
KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (;;)
|
||||||
|
{
|
||||||
|
/* Try to acquire it */
|
||||||
|
if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
|
||||||
|
{
|
||||||
|
/* Value changed... wait until it's locked */
|
||||||
|
while (*(volatile KSPIN_LOCK *)SpinLock == 1)
|
||||||
|
{
|
||||||
|
#ifdef DBG
|
||||||
|
/* On debug builds, we use a much slower but useful routine */
|
||||||
|
//Kii386SpinOnSpinLock(SpinLock, 5);
|
||||||
|
|
||||||
|
/* FIXME: Do normal yield for now */
|
||||||
|
YieldProcessor();
|
||||||
|
#else
|
||||||
|
/* Otherwise, just yield and keep looping */
|
||||||
|
YieldProcessor();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
#ifdef DBG
|
||||||
|
/* On debug builds, we OR in the KTHREAD */
|
||||||
|
*SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
|
||||||
|
#endif
|
||||||
|
/* All is well, break out */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Spinlock Release at IRQL >= DISPATCH_LEVEL
|
||||||
|
//
|
||||||
|
FORCEINLINE
|
||||||
|
VOID
|
||||||
|
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
|
||||||
|
{
|
||||||
|
#ifdef DBG
|
||||||
|
/* Make sure that the threads match */
|
||||||
|
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
|
||||||
|
{
|
||||||
|
/* They don't, bugcheck */
|
||||||
|
KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
/* Clear the lock */
|
||||||
|
InterlockedAnd((PLONG)SpinLock, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
//
|
||||||
|
// Spinlock Acquire at IRQL >= DISPATCH_LEVEL
|
||||||
|
//
|
||||||
|
FORCEINLINE
|
||||||
|
VOID
|
||||||
|
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
|
||||||
|
{
|
||||||
|
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
|
||||||
|
UNREFERENCED_PARAMETER(SpinLock);
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Spinlock Release at IRQL >= DISPATCH_LEVEL
|
||||||
|
//
|
||||||
|
FORCEINLINE
|
||||||
|
VOID
|
||||||
|
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
|
||||||
|
{
|
||||||
|
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
|
||||||
|
UNREFERENCED_PARAMETER(SpinLock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
/* FUNCTIONS *****************************************************************/
|
/* FUNCTIONS *****************************************************************/
|
||||||
|
|
||||||
/*
|
|
||||||
* @implemented
|
|
||||||
*/
|
|
||||||
VOID
|
|
||||||
NTAPI
|
|
||||||
KeLowerIrql(KIRQL NewIrql)
|
|
||||||
{
|
|
||||||
/* Call the fastcall function */
|
|
||||||
KfLowerIrql(NewIrql);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* @implemented
|
|
||||||
*/
|
|
||||||
VOID
|
|
||||||
NTAPI
|
|
||||||
KeRaiseIrql(KIRQL NewIrql,
|
|
||||||
PKIRQL OldIrql)
|
|
||||||
{
|
|
||||||
/* Call the fastcall function */
|
|
||||||
*OldIrql = KfRaiseIrql(NewIrql);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @implemented
|
* @implemented
|
||||||
*/
|
*/
|
||||||
|
@ -61,8 +134,14 @@ KIRQL
|
||||||
FASTCALL
|
FASTCALL
|
||||||
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
|
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
/* Simply raise to dispatch */
|
KIRQL OldIrql;
|
||||||
return KfRaiseIrql(DISPATCH_LEVEL);
|
|
||||||
|
/* Raise to sync */
|
||||||
|
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
|
||||||
|
|
||||||
|
/* Acquire the lock and return */
|
||||||
|
KxAcquireSpinLock(SpinLock);
|
||||||
|
return OldIrql;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -84,8 +163,12 @@ KIRQL
|
||||||
FASTCALL
|
FASTCALL
|
||||||
KfAcquireSpinLock(PKSPIN_LOCK SpinLock)
|
KfAcquireSpinLock(PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
/* Simply raise to dispatch */
|
KIRQL OldIrql;
|
||||||
return KfRaiseIrql(DISPATCH_LEVEL);
|
|
||||||
|
/* Raise to dispatch and acquire the lock */
|
||||||
|
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
||||||
|
KxAcquireSpinLock(SpinLock);
|
||||||
|
return OldIrql;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -96,8 +179,9 @@ FASTCALL
|
||||||
KfReleaseSpinLock(PKSPIN_LOCK SpinLock,
|
KfReleaseSpinLock(PKSPIN_LOCK SpinLock,
|
||||||
KIRQL OldIrql)
|
KIRQL OldIrql)
|
||||||
{
|
{
|
||||||
/* Simply lower IRQL back */
|
/* Release the lock and lower IRQL back */
|
||||||
KfLowerIrql(OldIrql);
|
KxReleaseSpinLock(SpinLock);
|
||||||
|
KeLowerIrql(OldIrql);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -107,8 +191,14 @@ KIRQL
|
||||||
FASTCALL
|
FASTCALL
|
||||||
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
||||||
{
|
{
|
||||||
/* Simply raise to dispatch */
|
KIRQL OldIrql;
|
||||||
return KfRaiseIrql(DISPATCH_LEVEL);
|
|
||||||
|
/* Raise to dispatch */
|
||||||
|
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
||||||
|
|
||||||
|
/* Acquire the lock */
|
||||||
|
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
|
||||||
|
return OldIrql;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -118,8 +208,14 @@ KIRQL
|
||||||
FASTCALL
|
FASTCALL
|
||||||
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
||||||
{
|
{
|
||||||
/* Simply raise to dispatch */
|
KIRQL OldIrql;
|
||||||
return KfRaiseIrql(DISPATCH_LEVEL);
|
|
||||||
|
/* Raise to synch */
|
||||||
|
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
|
||||||
|
|
||||||
|
/* Acquire the lock */
|
||||||
|
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
|
||||||
|
return OldIrql;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -130,8 +226,15 @@ FASTCALL
|
||||||
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
|
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
|
||||||
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
||||||
{
|
{
|
||||||
/* Simply raise to dispatch */
|
/* Set up the lock */
|
||||||
LockHandle->OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
|
LockHandle->LockQueue.Next = NULL;
|
||||||
|
LockHandle->LockQueue.Lock = SpinLock;
|
||||||
|
|
||||||
|
/* Raise to dispatch */
|
||||||
|
KeRaiseIrql(DISPATCH_LEVEL, &LockHandle->OldIrql);
|
||||||
|
|
||||||
|
/* Acquire the lock */
|
||||||
|
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -142,8 +245,15 @@ FASTCALL
|
||||||
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
|
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
|
||||||
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
||||||
{
|
{
|
||||||
/* Simply raise to synch */
|
/* Set up the lock */
|
||||||
LockHandle->OldIrql = KfRaiseIrql(SYNCH_LEVEL);
|
LockHandle->LockQueue.Next = NULL;
|
||||||
|
LockHandle->LockQueue.Lock = SpinLock;
|
||||||
|
|
||||||
|
/* Raise to synch */
|
||||||
|
KeRaiseIrql(SYNCH_LEVEL, &LockHandle->OldIrql);
|
||||||
|
|
||||||
|
/* Acquire the lock */
|
||||||
|
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -154,8 +264,11 @@ FASTCALL
|
||||||
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
||||||
IN KIRQL OldIrql)
|
IN KIRQL OldIrql)
|
||||||
{
|
{
|
||||||
/* Simply lower IRQL back */
|
/* Release the lock */
|
||||||
KfLowerIrql(OldIrql);
|
KxReleaseSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
|
||||||
|
|
||||||
|
/* Lower IRQL back */
|
||||||
|
KeLowerIrql(OldIrql);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -166,7 +279,8 @@ FASTCALL
|
||||||
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
|
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
|
||||||
{
|
{
|
||||||
/* Simply lower IRQL back */
|
/* Simply lower IRQL back */
|
||||||
KfLowerIrql(LockHandle->OldIrql);
|
KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
|
||||||
|
KeLowerIrql(LockHandle->OldIrql);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -177,8 +291,13 @@ FASTCALL
|
||||||
KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
||||||
IN PKIRQL OldIrql)
|
IN PKIRQL OldIrql)
|
||||||
{
|
{
|
||||||
/* Simply raise to dispatch */
|
#ifdef CONFIG_SMP
|
||||||
*OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
|
ASSERT(FALSE); // FIXME: Unused
|
||||||
|
while (TRUE);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Simply raise to synch */
|
||||||
|
KeRaiseIrql(SYNCH_LEVEL, OldIrql);
|
||||||
|
|
||||||
/* Always return true on UP Machines */
|
/* Always return true on UP Machines */
|
||||||
return TRUE;
|
return TRUE;
|
||||||
|
@ -192,11 +311,39 @@ FASTCALL
|
||||||
KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
||||||
OUT PKIRQL OldIrql)
|
OUT PKIRQL OldIrql)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
ASSERT(FALSE); // FIXME: Unused
|
||||||
|
while (TRUE);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Simply raise to dispatch */
|
/* Simply raise to dispatch */
|
||||||
*OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
|
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
|
||||||
|
|
||||||
/* Always return true on UP Machines */
|
/* Always return true on UP Machines */
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* EOF */
|
#undef KeRaiseIrql
|
||||||
|
/*
|
||||||
|
* @implemented
|
||||||
|
*/
|
||||||
|
VOID
|
||||||
|
NTAPI
|
||||||
|
KeRaiseIrql(KIRQL NewIrql,
|
||||||
|
PKIRQL OldIrql)
|
||||||
|
{
|
||||||
|
/* Call the fastcall function */
|
||||||
|
*OldIrql = KfRaiseIrql(NewIrql);
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef KeLowerIrql
|
||||||
|
/*
|
||||||
|
* @implemented
|
||||||
|
*/
|
||||||
|
VOID
|
||||||
|
NTAPI
|
||||||
|
KeLowerIrql(KIRQL NewIrql)
|
||||||
|
{
|
||||||
|
/* Call the fastcall function */
|
||||||
|
KfLowerIrql(NewIrql);
|
||||||
|
}
|
||||||
|
|
|
@ -568,8 +568,22 @@ Author:
|
||||||
#define APC_LEVEL 0x1
|
#define APC_LEVEL 0x1
|
||||||
#define DISPATCH_LEVEL 0x2
|
#define DISPATCH_LEVEL 0x2
|
||||||
#define CLOCK2_LEVEL 0x1C
|
#define CLOCK2_LEVEL 0x1C
|
||||||
|
#define IPI_LEVEL 0x1D
|
||||||
#define HIGH_LEVEL 0x1F
|
#define HIGH_LEVEL 0x1F
|
||||||
|
|
||||||
|
//
|
||||||
|
// Synchronization-level IRQL
|
||||||
|
//
|
||||||
|
#ifndef CONFIG_SMP
|
||||||
|
#define SYNCH_LEVEL DISPATCH_LEVEL
|
||||||
|
#else
|
||||||
|
#if (NTDDI_VERSION < NTDDI_WS03)
|
||||||
|
#define SYNCH_LEVEL (IPI_LEVEL - 0x1)
|
||||||
|
#else
|
||||||
|
#define SYNCH_LEVEL (IPI_LEVEL - 0x2)
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
//
|
//
|
||||||
// Quantum Decrements
|
// Quantum Decrements
|
||||||
//
|
//
|
||||||
|
|
|
@ -245,7 +245,7 @@ FASTCALL
|
||||||
KiExitDispatcher(KIRQL OldIrql);
|
KiExitDispatcher(KIRQL OldIrql);
|
||||||
|
|
||||||
VOID
|
VOID
|
||||||
NTAPI
|
FASTCALL
|
||||||
KiDeferredReadyThread(IN PKTHREAD Thread);
|
KiDeferredReadyThread(IN PKTHREAD Thread);
|
||||||
|
|
||||||
PKTHREAD
|
PKTHREAD
|
||||||
|
|
|
@ -437,12 +437,20 @@ FORCEINLINE
|
||||||
VOID
|
VOID
|
||||||
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
|
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
|
/* Make sure that we don't own the lock already */
|
||||||
|
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
|
||||||
|
{
|
||||||
|
/* We do, bugcheck! */
|
||||||
|
KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Start acquire loop */
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
/* Try to acquire it */
|
/* Try to acquire it */
|
||||||
if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
|
if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
|
||||||
{
|
{
|
||||||
/* Value changed... wait until it's locked */
|
/* Value changed... wait until it's unlocked */
|
||||||
while (*(volatile KSPIN_LOCK *)SpinLock == 1)
|
while (*(volatile KSPIN_LOCK *)SpinLock == 1)
|
||||||
{
|
{
|
||||||
#if DBG
|
#if DBG
|
||||||
|
@ -553,7 +561,8 @@ VOID
|
||||||
KiAcquireDispatcherLockAtDpcLevel(VOID)
|
KiAcquireDispatcherLockAtDpcLevel(VOID)
|
||||||
{
|
{
|
||||||
/* Acquire the dispatcher lock */
|
/* Acquire the dispatcher lock */
|
||||||
KeAcquireQueuedSpinLockAtDpcLevel(LockQueueDispatcherLock);
|
KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->
|
||||||
|
LockQueue[LockQueueDispatcherLock]);
|
||||||
}
|
}
|
||||||
|
|
||||||
FORCEINLINE
|
FORCEINLINE
|
||||||
|
@ -561,11 +570,12 @@ VOID
|
||||||
KiReleaseDispatcherLockFromDpcLevel(VOID)
|
KiReleaseDispatcherLockFromDpcLevel(VOID)
|
||||||
{
|
{
|
||||||
/* Release the dispatcher lock */
|
/* Release the dispatcher lock */
|
||||||
KeReleaseQueuedSpinLockFromDpcLevel(LockQueueDispatcherLock);
|
KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
|
||||||
|
LockQueue[LockQueueDispatcherLock]);
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// This routine inserts a thread into the deferred ready list of the given CPU
|
// This routine inserts a thread into the deferred ready list of the current CPU
|
||||||
//
|
//
|
||||||
FORCEINLINE
|
FORCEINLINE
|
||||||
VOID
|
VOID
|
||||||
|
@ -613,7 +623,7 @@ KiSetThreadSwapBusy(IN PKTHREAD Thread)
|
||||||
// This routine acquires the PRCB lock so that only one caller can touch
|
// This routine acquires the PRCB lock so that only one caller can touch
|
||||||
// volatile PRCB data.
|
// volatile PRCB data.
|
||||||
//
|
//
|
||||||
// Since this is a simple optimized spin-lock, it must be be only acquired
|
// Since this is a simple optimized spin-lock, it must only be acquired
|
||||||
// at dispatcher level or higher!
|
// at dispatcher level or higher!
|
||||||
//
|
//
|
||||||
FORCEINLINE
|
FORCEINLINE
|
||||||
|
@ -649,7 +659,8 @@ FORCEINLINE
|
||||||
VOID
|
VOID
|
||||||
KiReleasePrcbLock(IN PKPRCB Prcb)
|
KiReleasePrcbLock(IN PKPRCB Prcb)
|
||||||
{
|
{
|
||||||
/* Make sure it's acquired! */
|
/* Make sure we are above dispatch and the lock is acquired! */
|
||||||
|
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
|
||||||
ASSERT(Prcb->PrcbLock != 0);
|
ASSERT(Prcb->PrcbLock != 0);
|
||||||
|
|
||||||
/* Release it */
|
/* Release it */
|
||||||
|
@ -696,6 +707,9 @@ FORCEINLINE
|
||||||
VOID
|
VOID
|
||||||
KiReleaseThreadLock(IN PKTHREAD Thread)
|
KiReleaseThreadLock(IN PKTHREAD Thread)
|
||||||
{
|
{
|
||||||
|
/* Make sure we are still above dispatch */
|
||||||
|
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
|
||||||
|
|
||||||
/* Release it */
|
/* Release it */
|
||||||
InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
|
InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
|
||||||
}
|
}
|
||||||
|
@ -729,10 +743,8 @@ FORCEINLINE
|
||||||
VOID
|
VOID
|
||||||
KiRundownThread(IN PKTHREAD Thread)
|
KiRundownThread(IN PKTHREAD Thread)
|
||||||
{
|
{
|
||||||
#if defined(_M_IX86) || defined(_M_AMD64)
|
/* Nothing to do */
|
||||||
/* FIXME: TODO */
|
return;
|
||||||
ASSERTMSG("Not yet implemented\n", FALSE);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
FORCEINLINE
|
FORCEINLINE
|
||||||
|
|
|
@ -550,6 +550,9 @@ KiRetireDpcList(IN PKPRCB Prcb)
|
||||||
PKDEFERRED_ROUTINE DeferredRoutine;
|
PKDEFERRED_ROUTINE DeferredRoutine;
|
||||||
PVOID DeferredContext, SystemArgument1, SystemArgument2;
|
PVOID DeferredContext, SystemArgument1, SystemArgument2;
|
||||||
ULONG_PTR TimerHand;
|
ULONG_PTR TimerHand;
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
KIRQL OldIrql;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Get data and list variables before starting anything else */
|
/* Get data and list variables before starting anything else */
|
||||||
DpcData = &Prcb->DpcData[DPC_NORMAL];
|
DpcData = &Prcb->DpcData[DPC_NORMAL];
|
||||||
|
@ -631,12 +634,23 @@ KiRetireDpcList(IN PKPRCB Prcb)
|
||||||
Prcb->DpcRoutineActive = FALSE;
|
Prcb->DpcRoutineActive = FALSE;
|
||||||
Prcb->DpcInterruptRequested = FALSE;
|
Prcb->DpcInterruptRequested = FALSE;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
/* Check if we have deferred threads */
|
/* Check if we have deferred threads */
|
||||||
if (Prcb->DeferredReadyListHead.Next)
|
if (Prcb->DeferredReadyListHead.Next)
|
||||||
{
|
{
|
||||||
/* FIXME: 2K3-style scheduling not implemeted */
|
|
||||||
ASSERT(FALSE);
|
/* Re-enable interrupts and raise to synch */
|
||||||
|
_enable();
|
||||||
|
OldIrql = KeRaiseIrqlToSynchLevel();
|
||||||
|
|
||||||
|
/* Process deferred threads */
|
||||||
|
KiProcessDeferredReadyList(Prcb);
|
||||||
|
|
||||||
|
/* Lower IRQL back and disable interrupts */
|
||||||
|
KeLowerIrql(OldIrql);
|
||||||
|
_disable();
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
} while (DpcData->DpcQueueDepth != 0);
|
} while (DpcData->DpcQueueDepth != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -351,8 +351,8 @@ GetSwapLock:
|
||||||
jz NotBusy
|
jz NotBusy
|
||||||
pause
|
pause
|
||||||
jmp GetSwapLock
|
jmp GetSwapLock
|
||||||
#endif
|
|
||||||
NotBusy:
|
NotBusy:
|
||||||
|
#endif
|
||||||
/* Increase context switches (use ES for lazy load) */
|
/* Increase context switches (use ES for lazy load) */
|
||||||
inc dword ptr es:[ebx+KPCR_CONTEXT_SWITCHES]
|
inc dword ptr es:[ebx+KPCR_CONTEXT_SWITCHES]
|
||||||
|
|
||||||
|
@ -381,9 +381,9 @@ AfterTrace:
|
||||||
/* Check NPX State */
|
/* Check NPX State */
|
||||||
cmp byte ptr [edi+KTHREAD_NPX_STATE], NPX_STATE_LOADED
|
cmp byte ptr [edi+KTHREAD_NPX_STATE], NPX_STATE_LOADED
|
||||||
jz NpxLoaded
|
jz NpxLoaded
|
||||||
|
SetStack:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
SetStack:
|
|
||||||
/* Set new stack */
|
/* Set new stack */
|
||||||
mov [edi+KTHREAD_KERNEL_STACK], esp
|
mov [edi+KTHREAD_KERNEL_STACK], esp
|
||||||
|
|
||||||
|
@ -423,7 +423,7 @@ StackOk:
|
||||||
test [ebp+KPROCESS_ACTIVE_PROCESSORS], ecx
|
test [ebp+KPROCESS_ACTIVE_PROCESSORS], ecx
|
||||||
jz WrongActiveCpu
|
jz WrongActiveCpu
|
||||||
test [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
|
test [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
|
||||||
jz WrongActiveCpu
|
jnz WrongActiveCpu
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -563,8 +563,29 @@ NewCr0:
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
NpxLoaded:
|
NpxLoaded:
|
||||||
|
|
||||||
/* FIXME: TODO */
|
/* Mask out FPU flags */
|
||||||
int 3
|
and edx, ~(CR0_MP + CR0_EM + CR0_TS)
|
||||||
|
|
||||||
|
/* Get the NPX Frame */
|
||||||
|
mov ecx, [edi+KTHREAD_INITIAL_STACK]
|
||||||
|
sub ecx, NPX_FRAME_LENGTH
|
||||||
|
|
||||||
|
/* Check if we have a new CR0 */
|
||||||
|
cmp ebp, edx
|
||||||
|
jz Cr0Equal
|
||||||
|
|
||||||
|
/* We do, update it */
|
||||||
|
mov cr0, edx
|
||||||
|
mov ebp, edx
|
||||||
|
|
||||||
|
Cr0Equal:
|
||||||
|
|
||||||
|
/* Save the NPX State */
|
||||||
|
fxsave [ecx]
|
||||||
|
mov byte ptr [edi+KTHREAD_NPX_STATE], NPX_STATE_NOT_LOADED
|
||||||
|
|
||||||
|
/* Clear the NPX Thread */
|
||||||
|
mov dword ptr [ebx+KPCR_NPX_THREAD], 0
|
||||||
|
|
||||||
/* Jump back */
|
/* Jump back */
|
||||||
jmp SetStack
|
jmp SetStack
|
||||||
|
@ -762,7 +783,7 @@ SwapContext:
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
SameThread:
|
SameThread:
|
||||||
/* Clear the next thread, and put the thready as ready after lock release */
|
/* Clear the next thread, and put the thread as ready after lock release */
|
||||||
and dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
|
and dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
|
||||||
and dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
|
and dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
|
||||||
and byte ptr [edi+KTHREAD_STATE_], Ready
|
and byte ptr [edi+KTHREAD_STATE_], Ready
|
||||||
|
@ -837,9 +858,9 @@ _KiSwapProcess@8:
|
||||||
|
|
||||||
/* Sanity check */
|
/* Sanity check */
|
||||||
#if DBG
|
#if DBG
|
||||||
test dword ptr [edx+KPROCESS_ACTIVE_PROCESSORS], 0
|
test [edx+KPROCESS_ACTIVE_PROCESSORS], ecx
|
||||||
jz WrongCpu1
|
jz WrongCpu1
|
||||||
test dword ptr [eax+KPROCESS_ACTIVE_PROCESSORS], 0
|
test [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
|
||||||
jnz WrongCpu2
|
jnz WrongCpu2
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2736,12 +2736,26 @@ _KeSynchronizeExecution@12:
|
||||||
/* Go to DIRQL */
|
/* Go to DIRQL */
|
||||||
mov cl, [ebx+KINTERRUPT_SYNCHRONIZE_IRQL]
|
mov cl, [ebx+KINTERRUPT_SYNCHRONIZE_IRQL]
|
||||||
call @KfRaiseIrql@4
|
call @KfRaiseIrql@4
|
||||||
|
push eax
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* Acquire the interrupt spinlock FIXME: Write this in assembly */
|
||||||
|
mov ecx, [ebx+KINTERRUPT_ACTUAL_LOCK]
|
||||||
|
call @KefAcquireSpinLockAtDpcLevel@4
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Call the routine */
|
/* Call the routine */
|
||||||
push eax
|
|
||||||
push [esp+20]
|
push [esp+20]
|
||||||
call [esp+20]
|
call [esp+20]
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* Release the interrupt spinlock FIXME: Write this in assembly */
|
||||||
|
push eax
|
||||||
|
mov ecx, [ebx+KINTERRUPT_ACTUAL_LOCK]
|
||||||
|
call @KefReleaseSpinLockFromDpcLevel@4
|
||||||
|
pop eax
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Lower IRQL */
|
/* Lower IRQL */
|
||||||
mov ebx, eax
|
mov ebx, eax
|
||||||
pop ecx
|
pop ecx
|
||||||
|
@ -2752,3 +2766,32 @@ _KeSynchronizeExecution@12:
|
||||||
pop ebx
|
pop ebx
|
||||||
ret 12
|
ret 12
|
||||||
.endfunc
|
.endfunc
|
||||||
|
|
||||||
|
/*++
|
||||||
|
* Kii386SpinOnSpinLock
|
||||||
|
*
|
||||||
|
* FILLMEIN
|
||||||
|
*
|
||||||
|
* Params:
|
||||||
|
* SpinLock - FILLMEIN
|
||||||
|
*
|
||||||
|
* Flags - FILLMEIN
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* None.
|
||||||
|
*
|
||||||
|
* Remarks:
|
||||||
|
* FILLMEIN
|
||||||
|
*
|
||||||
|
*--*/
|
||||||
|
.globl _Kii386SpinOnSpinLock@8
|
||||||
|
.func Kii386SpinOnSpinLock@8
|
||||||
|
_Kii386SpinOnSpinLock@8:
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* FIXME: TODO */
|
||||||
|
int 3
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ret 8
|
||||||
|
.endfunc
|
||||||
|
|
|
@ -17,6 +17,11 @@
|
||||||
|
|
||||||
/* PRIVATE FUNCTIONS *********************************************************/
|
/* PRIVATE FUNCTIONS *********************************************************/
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
//
|
||||||
|
// FIXME: The queued spinlock routines are broken.
|
||||||
|
//
|
||||||
|
|
||||||
VOID
|
VOID
|
||||||
FASTCALL
|
FASTCALL
|
||||||
KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
|
KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
|
||||||
|
@ -84,6 +89,55 @@ KeReleaseQueuedSpinLockFromDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
//
|
||||||
|
// HACK: Hacked to work like normal spinlocks
|
||||||
|
//
|
||||||
|
|
||||||
|
VOID
|
||||||
|
FASTCALL
|
||||||
|
KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* Make sure we are at DPC or above! */
|
||||||
|
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
|
||||||
|
{
|
||||||
|
/* We aren't -- bugcheck */
|
||||||
|
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
|
||||||
|
(ULONG_PTR)LockHandle->Lock,
|
||||||
|
KeGetCurrentIrql(),
|
||||||
|
0,
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Do the inlined function */
|
||||||
|
KxAcquireSpinLock(LockHandle->Lock);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
VOID
|
||||||
|
FASTCALL
|
||||||
|
KeReleaseQueuedSpinLockFromDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* Make sure we are at DPC or above! */
|
||||||
|
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
|
||||||
|
{
|
||||||
|
/* We aren't -- bugcheck */
|
||||||
|
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
|
||||||
|
(ULONG_PTR)LockHandle->Lock,
|
||||||
|
KeGetCurrentIrql(),
|
||||||
|
0,
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Do the inlined function */
|
||||||
|
KxReleaseSpinLock(LockHandle->Lock);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
/* PUBLIC FUNCTIONS **********************************************************/
|
/* PUBLIC FUNCTIONS **********************************************************/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -137,6 +191,17 @@ VOID
|
||||||
NTAPI
|
NTAPI
|
||||||
KeAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
|
KeAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
|
/* Make sure we are at DPC or above! */
|
||||||
|
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
|
||||||
|
{
|
||||||
|
/* We aren't -- bugcheck */
|
||||||
|
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
|
||||||
|
(ULONG_PTR)SpinLock,
|
||||||
|
KeGetCurrentIrql(),
|
||||||
|
0,
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Do the inlined function */
|
/* Do the inlined function */
|
||||||
KxAcquireSpinLock(SpinLock);
|
KxAcquireSpinLock(SpinLock);
|
||||||
}
|
}
|
||||||
|
@ -149,7 +214,18 @@ VOID
|
||||||
NTAPI
|
NTAPI
|
||||||
KeReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
|
KeReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
/* Do the lined function */
|
/* Make sure we are at DPC or above! */
|
||||||
|
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
|
||||||
|
{
|
||||||
|
/* We aren't -- bugcheck */
|
||||||
|
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
|
||||||
|
(ULONG_PTR)SpinLock,
|
||||||
|
KeGetCurrentIrql(),
|
||||||
|
0,
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Do the inlined function */
|
||||||
KxReleaseSpinLock(SpinLock);
|
KxReleaseSpinLock(SpinLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,6 +236,17 @@ VOID
|
||||||
FASTCALL
|
FASTCALL
|
||||||
KefAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
|
KefAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
|
/* Make sure we are at DPC or above! */
|
||||||
|
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
|
||||||
|
{
|
||||||
|
/* We aren't -- bugcheck */
|
||||||
|
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
|
||||||
|
(ULONG_PTR)SpinLock,
|
||||||
|
KeGetCurrentIrql(),
|
||||||
|
0,
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Do the inlined function */
|
/* Do the inlined function */
|
||||||
KxAcquireSpinLock(SpinLock);
|
KxAcquireSpinLock(SpinLock);
|
||||||
}
|
}
|
||||||
|
@ -171,7 +258,18 @@ VOID
|
||||||
FASTCALL
|
FASTCALL
|
||||||
KefReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
|
KefReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
/* Do the lined function */
|
/* Make sure we are at DPC or above! */
|
||||||
|
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
|
||||||
|
{
|
||||||
|
/* We aren't -- bugcheck */
|
||||||
|
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
|
||||||
|
(ULONG_PTR)SpinLock,
|
||||||
|
KeGetCurrentIrql(),
|
||||||
|
0,
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Do the inlined function */
|
||||||
KxReleaseSpinLock(SpinLock);
|
KxReleaseSpinLock(SpinLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,7 +341,23 @@ KeAcquireInStackQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock,
|
||||||
/* Set it up properly */
|
/* Set it up properly */
|
||||||
LockHandle->LockQueue.Next = NULL;
|
LockHandle->LockQueue.Next = NULL;
|
||||||
LockHandle->LockQueue.Lock = SpinLock;
|
LockHandle->LockQueue.Lock = SpinLock;
|
||||||
|
#if 0
|
||||||
KeAcquireQueuedSpinLockAtDpcLevel(LockHandle->LockQueue.Next);
|
KeAcquireQueuedSpinLockAtDpcLevel(LockHandle->LockQueue.Next);
|
||||||
|
#else
|
||||||
|
/* Make sure we are at DPC or above! */
|
||||||
|
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
|
||||||
|
{
|
||||||
|
/* We aren't -- bugcheck */
|
||||||
|
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
|
||||||
|
(ULONG_PTR)LockHandle->LockQueue.Lock,
|
||||||
|
KeGetCurrentIrql(),
|
||||||
|
0,
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Acquire the lock */
|
||||||
|
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,8 +369,24 @@ FASTCALL
|
||||||
KeReleaseInStackQueuedSpinLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE LockHandle)
|
KeReleaseInStackQueuedSpinLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE LockHandle)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
#if 0
|
||||||
/* Call the internal function */
|
/* Call the internal function */
|
||||||
KeReleaseQueuedSpinLockFromDpcLevel(LockHandle->LockQueue.Next);
|
KeReleaseQueuedSpinLockFromDpcLevel(LockHandle->LockQueue.Next);
|
||||||
|
#else
|
||||||
|
/* Make sure we are at DPC or above! */
|
||||||
|
if (KeGetCurrentIrql() < DISPATCH_LEVEL)
|
||||||
|
{
|
||||||
|
/* We aren't -- bugcheck */
|
||||||
|
KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
|
||||||
|
(ULONG_PTR)LockHandle->LockQueue.Lock,
|
||||||
|
KeGetCurrentIrql(),
|
||||||
|
0,
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Release the lock */
|
||||||
|
KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -324,5 +454,3 @@ KeTestSpinLock(IN PKSPIN_LOCK SpinLock)
|
||||||
/* Spinlock appears to be free */
|
/* Spinlock appears to be free */
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* EOF */
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ FASTCALL
|
||||||
KiIdleSchedule(IN PKPRCB Prcb)
|
KiIdleSchedule(IN PKPRCB Prcb)
|
||||||
{
|
{
|
||||||
/* FIXME: TODO */
|
/* FIXME: TODO */
|
||||||
ASSERTMSG("Not yet implemented\n", FALSE);
|
ASSERTMSG("SMP: Not yet implemented\n", FALSE);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,8 +32,29 @@ VOID
|
||||||
FASTCALL
|
FASTCALL
|
||||||
KiProcessDeferredReadyList(IN PKPRCB Prcb)
|
KiProcessDeferredReadyList(IN PKPRCB Prcb)
|
||||||
{
|
{
|
||||||
/* FIXME: TODO */
|
PSINGLE_LIST_ENTRY ListEntry;
|
||||||
ASSERTMSG("Not yet implemented\n", FALSE);
|
PKTHREAD Thread;
|
||||||
|
|
||||||
|
/* Make sure there is something on the ready list */
|
||||||
|
ASSERT(Prcb->DeferredReadyListHead.Next != NULL);
|
||||||
|
|
||||||
|
/* Get the first entry and clear the list */
|
||||||
|
ListEntry = Prcb->DeferredReadyListHead.Next;
|
||||||
|
Prcb->DeferredReadyListHead.Next = NULL;
|
||||||
|
|
||||||
|
/* Start processing loop */
|
||||||
|
do
|
||||||
|
{
|
||||||
|
/* Get the thread and advance to the next entry */
|
||||||
|
Thread = CONTAINING_RECORD(ListEntry, KTHREAD, SwapListEntry);
|
||||||
|
ListEntry = ListEntry->Next;
|
||||||
|
|
||||||
|
/* Make the thread ready */
|
||||||
|
KiDeferredReadyThread(Thread);
|
||||||
|
} while (ListEntry != NULL);
|
||||||
|
|
||||||
|
/* Make sure the ready list is still empty */
|
||||||
|
ASSERT(Prcb->DeferredReadyListHead.Next == NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
VOID
|
VOID
|
||||||
|
@ -46,7 +67,7 @@ KiQueueReadyThread(IN PKTHREAD Thread,
|
||||||
}
|
}
|
||||||
|
|
||||||
VOID
|
VOID
|
||||||
NTAPI
|
FASTCALL
|
||||||
KiDeferredReadyThread(IN PKTHREAD Thread)
|
KiDeferredReadyThread(IN PKTHREAD Thread)
|
||||||
{
|
{
|
||||||
PKPRCB Prcb;
|
PKPRCB Prcb;
|
||||||
|
@ -191,9 +212,10 @@ KiDeferredReadyThread(IN PKTHREAD Thread)
|
||||||
OldPriority = Thread->Priority;
|
OldPriority = Thread->Priority;
|
||||||
Thread->Preempted = FALSE;
|
Thread->Preempted = FALSE;
|
||||||
|
|
||||||
/* Queue the thread on CPU 0 and get the PRCB */
|
/* Queue the thread on CPU 0 and get the PRCB and lock it */
|
||||||
Thread->NextProcessor = 0;
|
Thread->NextProcessor = 0;
|
||||||
Prcb = KiProcessorBlock[0];
|
Prcb = KiProcessorBlock[0];
|
||||||
|
KiAcquirePrcbLock(Prcb);
|
||||||
|
|
||||||
/* Check if we have an idle summary */
|
/* Check if we have an idle summary */
|
||||||
if (KiIdleSummary)
|
if (KiIdleSummary)
|
||||||
|
@ -202,6 +224,9 @@ KiDeferredReadyThread(IN PKTHREAD Thread)
|
||||||
KiIdleSummary = 0;
|
KiIdleSummary = 0;
|
||||||
Thread->State = Standby;
|
Thread->State = Standby;
|
||||||
Prcb->NextThread = Thread;
|
Prcb->NextThread = Thread;
|
||||||
|
|
||||||
|
/* Unlock the PRCB and return */
|
||||||
|
KiReleasePrcbLock(Prcb);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -300,6 +325,7 @@ KiSelectNextThread(IN PKPRCB Prcb)
|
||||||
Prcb->IdleSchedule = TRUE;
|
Prcb->IdleSchedule = TRUE;
|
||||||
|
|
||||||
/* FIXME: SMT support */
|
/* FIXME: SMT support */
|
||||||
|
ASSERTMSG("SMP: Not yet implemented\n", FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sanity checks and return the thread */
|
/* Sanity checks and return the thread */
|
||||||
|
|
|
@ -718,7 +718,7 @@
|
||||||
@ fastcall KiReleaseSpinLock(ptr)
|
@ fastcall KiReleaseSpinLock(ptr)
|
||||||
@ cdecl KiUnexpectedInterrupt()
|
@ cdecl KiUnexpectedInterrupt()
|
||||||
#ifdef _M_IX86
|
#ifdef _M_IX86
|
||||||
;Kii386SpinOnSpinLock
|
@ stdcall Kii386SpinOnSpinLock(ptr long)
|
||||||
#endif
|
#endif
|
||||||
@ stdcall LdrAccessResource(ptr ptr ptr ptr)
|
@ stdcall LdrAccessResource(ptr ptr ptr ptr)
|
||||||
@ stdcall LdrEnumResources(ptr ptr long ptr ptr)
|
@ stdcall LdrEnumResources(ptr ptr long ptr ptr)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue