Implemented Guarded Mutex, a drop-in replacement for Fast Mutex (the correct one, not the one ROS incorrectly implements) on NT 5.2. Not fully tested yet, so nothing switched to it and probably not usable. Also made KeGetCurrentThread/Irql become inlined, since this should create quite a speed boost. Made KeLeaveCriticalRegion deliver APCs if possible, and made Crit regions macros usable from outside ntoskrnl thanks to a new NT 5.2 API (KiCheckForKernelApcDelivery). Guarded Mutex code based on Filip Navara.

svn path=/trunk/; revision=14625
This commit is contained in:
Alex Ionescu 2005-04-15 06:24:35 +00:00
parent 3e3c9e9775
commit 956f376f4f
18 changed files with 473 additions and 59 deletions

View file

@ -41,6 +41,7 @@
#undef VERSION
#define VERSION "0.0.1"
#undef KeGetCurrentIrql
/* PROTOTYPES ***************************************************************/
NTSTATUS STDCALL

View file

@ -748,7 +748,7 @@ KeFlushWriteBuffer(VOID)
UNIMPLEMENTED;
}
#undef KeGetCurrentIrql
KIRQL
STDCALL
KeGetCurrentIrql(VOID)

View file

@ -18,6 +18,8 @@
/* FUNCTIONS *****************************************************************/
#undef KeEnterCriticalRegion
#undef KeLeaveCriticalRegion
VOID FASTCALL
ExAcquireFastMutex (PFAST_MUTEX FastMutex)
{

View file

@ -65,6 +65,7 @@ KiInterruptDispatch2 (ULONG Irq, KIRQL old_level);
/* FUNCTIONS ****************************************************************/
#undef KeGetCurrentIrql
KIRQL STDCALL KeGetCurrentIrql (VOID)
/*
* PURPOSE: Returns the current irq level

View file

@ -56,7 +56,7 @@ typedef enum _KWAIT_REASON
WrPageOut,
WrRendezvous,
Spare2,
Spare3,
WrGuardedMutex,
Spare4,
Spare5,
Spare6,

View file

@ -809,4 +809,48 @@ KeRaiseUserException(
IN NTSTATUS ExceptionCode
);
VOID
FASTCALL
KeAcquireGuardedMutex(
PKGUARDED_MUTEX GuardedMutex
);
VOID
FASTCALL
KeAcquireGuardedMutexUnsafe(
PKGUARDED_MUTEX GuardedMutex
);
VOID
STDCALL
KeEnterGuardedRegion(VOID);
VOID
STDCALL
KeLeaveGuardedRegion(VOID);
VOID
FASTCALL
KeInitializeGuardedMutex(
PKGUARDED_MUTEX GuardedMutex
);
VOID
FASTCALL
KeReleaseGuardedMutexUnsafe(
PKGUARDED_MUTEX GuardedMutex
);
VOID
FASTCALL
KeReleaseGuardedMutex(
PKGUARDED_MUTEX GuardedMutex
);
BOOL
FASTCALL
KeTryToAcquireGuardedMutex(
PKGUARDED_MUTEX GuardedMutex
);
#endif /* __INCLUDE_DDK_KEFUNCS_H */

View file

@ -180,6 +180,21 @@ typedef struct _KMUTEX
UCHAR ApcDisable;
} KMUTEX, *PKMUTEX, KMUTANT, *PKMUTANT;
typedef struct _KGUARDED_MUTEX
{
LONG Count;
struct _KTHREAD* Owner;
ULONG Contention;
KGATE Gate;
union {
struct {
SHORT KernelApcDisable;
SHORT SpecialApcDisable;
};
ULONG CombinedApcDisable;
};
} KGUARDED_MUTEX, *PKGUARDED_MUTEX;
#include <pshpack1.h>
typedef struct _KSEMAPHORE

View file

@ -100,6 +100,8 @@ OBJECTS_KE = \
ke/dpc.o \
ke/device.o \
ke/event.o \
ke/gate.o \
ke/gmutex.o \
ke/kqueue.o \
ke/kthread.o \
ke/ipi.o \

View file

@ -396,6 +396,44 @@ InterlockedExchangeAdd(PLONG Addend,
#error Unknown compiler for inline assembler
#endif
/**********************************************************************
* FASTCALL: @InterlockedClearBit@8
* STDCALL: _InterlockedClearBit@8
*/
#if defined(__GNUC__)
/*
* @implemented
*/
UCHAR
FASTCALL
InterlockedClearBit(PLONG Destination,
LONG Bit);
__asm__("\n\t.global @InterlockedClearBit@8\n\t"
"@InterlockedClearBit@8:\n\t"
LOCK
"btr %edx,(%ecx)\n\t"
"setc %al\n\t"
"ret $8\n\t");
#elif defined(_MSC_VER)
/*
* @implemented
*/
__declspec(naked)
UCHAR
FASTCALL
InterlockedClearBit(PUCHAR Destination,
UCHAR Bit)
{
__asm LOCK btr [ecx], edx
__asm setc al
__asm ret
}
#else
#error Unknown compiler for inline assembler
#endif
/**********************************************************************
* FASTCALL: @InterlockedCompareExchange@12

View file

@ -205,6 +205,7 @@ NtEarlyInitVdm(VOID);
#define LOCK ""
#endif
#define KeGetCurrentIrql(X) (((PKPCR)KPCR_BASE)->Irql)
#if defined(__GNUC__)
#define Ke386DisableInterrupts() __asm__("cli\n\t");

View file

@ -292,6 +292,7 @@ static inline PKPRCB KeGetCurrentPrcb(VOID)
#define KeGetCurrentKPCR(X) ((PKPCR)KPCR_BASE)
#define KeGetCurrentPrcb() (((PKPCR)KPCR_BASE)->Prcb)
#define KeGetCurrentThread(X) (((PKPCR)KPCR_BASE)->PrcbData.CurrentThread)
#endif

View file

@ -49,6 +49,26 @@ struct _KEXCEPTION_FRAME;
#define IPI_REQUEST_DPC 2
#define IPI_REQUEST_FREEZE 3
/* MACROS *************************************************************************/
#define KeEnterCriticalRegion(X) \
{ \
PKTHREAD _Thread = KeGetCurrentThread(); \
if (_Thread) _Thread->KernelApcDisable--; \
}
#define KeLeaveCriticalRegion(X) \
{ \
PKTHREAD _Thread = KeGetCurrentThread(); \
if((_Thread) && (++_Thread->KernelApcDisable == 0)) \
{ \
if (!IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) \
{ \
KiKernelApcDeliveryCheck(); \
} \
} \
}
/* threadsch.c ********************************************************************/
/* Thread Scheduler Functions */
@ -77,6 +97,28 @@ STDCALL
KiUnblockThread(PKTHREAD Thread,
PNTSTATUS WaitStatus,
KPRIORITY Increment);
/* gmutex.c ********************************************************************/
VOID
FASTCALL
KiAcquireGuardedMutexContented(PKGUARDED_MUTEX GuardedMutex);
/* gate.c **********************************************************************/
VOID
FASTCALL
KeInitializeGate(PKGATE Gate);
VOID
FASTCALL
KeSignalGateBoostPriority(PKGATE Gate);
VOID
FASTCALL
KeWaitForGate(PKGATE Gate,
KWAIT_REASON WaitReason,
KPROCESSOR_MODE WaitMode);
/* ipi.c ********************************************************************/
@ -237,7 +279,9 @@ VOID KeContextToTrapFrame(PCONTEXT Context, PKTRAP_FRAME TrapFrame);
VOID STDCALL KiDeliverApc(KPROCESSOR_MODE PreviousMode,
PVOID Reserved,
PKTRAP_FRAME TrapFrame);
VOID
STDCALL
KiKernelApcDeliveryCheck(VOID);
LONG
STDCALL
KiInsertQueue(IN PKQUEUE Queue,

View file

@ -97,7 +97,13 @@ typedef struct _KTHREAD
CHAR Quantum; /* 6B */
KWAIT_BLOCK WaitBlock[4]; /* 6C */
PVOID LegoData; /* CC */
ULONG KernelApcDisable; /* D0 */
union {
struct {
USHORT KernelApcDisable;
USHORT SpecialApcDisable;
};
ULONG CombinedApcDisable; /* D0 */
};
KAFFINITY UserAffinity; /* D4 */
UCHAR SystemAffinityActive;/* D8 */
UCHAR PowerState; /* D9 */

View file

@ -16,6 +16,51 @@
/* FUNCTIONS *****************************************************************/
/*++
* KiKernelApcDeliveryCheck
* @implemented NT 5.2
*
* The KiKernelApcDeliveryCheck routine is called whenever APCs have just
* been re-enabled in Kernel Mode, such as after leaving a Critical or
* Guarded Region. It delivers APCs if the environment is right.
*
* Params:
* None.
*
* Returns:
* None.
*
* Remarks:
* This routine allows KeLeave/EnterCritical/GuardedRegion to be used as a
* macro from inside WIN32K or other Drivers, which will then only have to
* do an Import API call in the case where APCs are enabled again.
*
*--*/
VOID
STDCALL
KiKernelApcDeliveryCheck(VOID)
{
/* We should only deliver at passive */
if (KeGetCurrentIrql() == PASSIVE_LEVEL)
{
/* Raise to APC and Deliver APCs, then lower back to Passive */
KfRaiseIrql(APC_LEVEL);
KiDeliverApc(KernelMode, 0, 0);
KfLowerIrql(PASSIVE_LEVEL);
}
else
{
/*
* If we're not at passive level it means someone raised IRQL
* to APC level before the a critical or guarded section was entered
* (e.g) by a fast mutex). This implies that the APCs shouldn't
* be delivered now, but after the IRQL is lowered to passive
* level again.
*/
HalRequestSoftwareInterrupt(APC_LEVEL);
}
}
/*++
* KeEnterCriticalRegion
* @implemented NT4
@ -37,6 +82,7 @@
* Callers of KeEnterCriticalRegion must be running at IRQL <= APC_LEVEL.
*
*--*/
#undef KeEnterCriticalRegion
VOID
STDCALL
KeEnterCriticalRegion(VOID)
@ -46,6 +92,45 @@ KeEnterCriticalRegion(VOID)
if (Thread) Thread->KernelApcDisable--;
}
/*++
* KeLeaveCriticalRegion
* @implemented NT4
*
* The KeLeaveCriticalRegion routine reenables the delivery of normal
* kernel-mode APCs that were disabled by a call to KeEnterCriticalRegion.
*
* Params:
* None.
*
* Returns:
* None.
*
* Remarks:
* Highest-level drivers can call this routine while running in the context
* of the thread that requested the current I/O operation.
*
* Callers of KeLeaveCriticalRegion must be running at IRQL <= DISPATCH_LEVEL.
*
*--*/
#undef KeLeaveCriticalRegion
VOID
STDCALL
KeLeaveCriticalRegion (VOID)
{
PKTHREAD Thread = KeGetCurrentThread();
/* Check if Kernel APCs are now enabled */
if((Thread) && (++Thread->KernelApcDisable == 0))
{
/* Check if we need to request an APC Delivery */
if (!IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode]))
{
/* Check for the right environment */
KiKernelApcDeliveryCheck();
}
}
}
/*++
* KeInitializeApc
* @implemented NT4
@ -325,45 +410,6 @@ KeInsertQueueApc(PKAPC Apc,
return Inserted;
}
/*++
* KeLeaveCriticalRegion
* @implemented NT4
*
* The KeLeaveCriticalRegion routine reenables the delivery of normal
* kernel-mode APCs that were disabled by a call to KeEnterCriticalRegion.
*
* Params:
* None.
*
* Returns:
* None.
*
* Remarks:
* Highest-level drivers can call this routine while running in the context
* of the thread that requested the current I/O operation.
*
* Callers of KeLeaveCriticalRegion must be running at IRQL <= DISPATCH_LEVEL.
*
*--*/
VOID
STDCALL
KeLeaveCriticalRegion (VOID)
{
PKTHREAD Thread = KeGetCurrentThread();
/* Check if Kernel APCs are now enabled */
if((Thread) && (++Thread->KernelApcDisable == 0)) {
/* Check if we need to request an APC Delivery */
if (!IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode])) {
/* Set APC Pending */
Thread->ApcState.KernelApcPending = TRUE;
HalRequestSoftwareInterrupt(APC_LEVEL);
}
}
}
/*++
* KeRemoveQueueApc
*

View file

@ -1,11 +1,11 @@
/* $Id$
*
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* PROJECT: ReactOS Kernel
* FILE: ntoskrnl/ke/gmutex.c
* PURPOSE: Implements guarded mutex (w2k3+/64)
* PURPOSE: Implements Guarded Mutex
*
* PROGRAMMERS: No programmer listed.
* PROGRAMMERS: Alex Ionescu (alex@relsoft.net) and
* Filip Navara (xnavara@volny.cz)
*/
/* INCLUDES *****************************************************************/
@ -13,17 +13,209 @@
#include <ntoskrnl.h>
#include <internal/debug.h>
UCHAR
FASTCALL
InterlockedClearBit(PLONG Destination,
LONG Bit);
typedef enum _KGUARDED_MUTEX_BITS
{
GM_LOCK_BIT = 1,
GM_LOCK_WAITER_WOKEN = 2,
GM_LOCK_WAITER_INC = 4
} KGUARDED_MUTEX_BITS;
/* FUNCTIONS *****************************************************************/
/*
KeAcquireGuardedMutex
KeAcquireGuardedMutexUnsafe
KeEnterGuardedRegion
KeInitializeGuardedMutex
KeReleaseGuardedMutexUnsafe
KeTryToAcquireGuardedMutex
KeReleaseGuardedMutex
KeLeaveGuardedRegion
*/
/**
* @name KeEnterGuardedRegion
*
* Enters a guarded region. This causes all (incl. special kernel) APCs
* to be disabled.
*/
VOID
STDCALL
KeEnterGuardedRegion(VOID)
{
/* Disable Special APCs */
KeGetCurrentThread()->SpecialApcDisable--;
}
/**
* @name KeLeaveGuardedRegion
*
* Leaves a guarded region and delivers pending APCs if possible.
*/
VOID
STDCALL
KeLeaveGuardedRegion(VOID)
{
PKTHREAD Thread = KeGetCurrentThread();
/* Boost the enable count and check if Special APCs are enabled */
if (++Thread->SpecialApcDisable == 0)
{
/* Check if there are Kernel APCs on the list */
if (!IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode]))
{
/* Check for APC Delivery */
KiKernelApcDeliveryCheck();
}
}
}
VOID
FASTCALL
KeInitializeGuardedMutex(PKGUARDED_MUTEX GuardedMutex)
{
/* Setup the Initial Data */
GuardedMutex->Count = GM_LOCK_BIT;
GuardedMutex->Owner = NULL;
GuardedMutex->Contention = 0;
/* Initialize the Wait Gate */
KeInitializeGate(&GuardedMutex->Gate);
}
VOID
FASTCALL
KiAcquireGuardedMutexContented(PKGUARDED_MUTEX GuardedMutex)
{
ULONG BitsToRemove;
ULONG BitsToAdd;
LONG OldValue;
/* Increase the contention count */
InterlockedIncrement(&GuardedMutex->Contention);
/* Start by unlocking the Guarded Mutex */
BitsToRemove = GM_LOCK_BIT;
BitsToAdd = GM_LOCK_WAITER_INC;
while (1)
{
/* Get the Count Bits */
OldValue = (volatile LONG)GuardedMutex->Count;
/* Check if the Guarded Mutex is locked */
if (OldValue & GM_LOCK_BIT)
{
/* Unlock it by removing the Lock Bit */
if (InterlockedCompareExchange(&GuardedMutex->Count,
OldValue &~ BitsToRemove,
OldValue) == OldValue)
{
/* The Guarded Mutex is now unlocked */
break;
}
}
else
{
/* The Guarded Mutex isn't locked, so simply set the bits */
if (InterlockedCompareExchange(&GuardedMutex->Count,
OldValue | BitsToAdd,
OldValue) != OldValue)
{
/* The Guarded Mutex value changed behind our back, start over */
continue;
}
/* Now we have to wait for it */
KeWaitForGate(&GuardedMutex->Gate, WrGuardedMutex, KernelMode);
/* Ok, the wait is done, so set the new bits */
BitsToRemove = GM_LOCK_BIT | GM_LOCK_WAITER_WOKEN;
BitsToAdd = GM_LOCK_WAITER_WOKEN;
}
}
}
VOID
FASTCALL
KeAcquireGuardedMutex(PKGUARDED_MUTEX GuardedMutex)
{
/* Disable Special APCs */
KeEnterGuardedRegion();
/* Do the Unsafe Acquire */
KeAcquireGuardedMutexUnsafe(GuardedMutex);
}
VOID
FASTCALL
KeAcquireGuardedMutexUnsafe(PKGUARDED_MUTEX GuardedMutex)
{
/* Remove the lock */
if (!InterlockedClearBit(&GuardedMutex->Count, 0))
{
/* The Guarded Mutex was already locked, enter contented case */
KiAcquireGuardedMutexContented(GuardedMutex);
}
/* Set the Owner */
GuardedMutex->Owner = KeGetCurrentThread();
}
VOID
FASTCALL
KeReleaseGuardedMutexUnsafe(PKGUARDED_MUTEX GuardedMutex)
{
LONG OldValue;
/* Destroy the Owner */
GuardedMutex->Owner = NULL;
/* Add the Lock Bit */
OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, 1);
/* Check if it was already locked, but not woken */
if (OldValue && !(OldValue & GM_LOCK_WAITER_WOKEN))
{
/* Update the Oldvalue to what it should be now */
OldValue |= GM_LOCK_BIT;
/* Remove the Woken bit */
if (InterlockedCompareExchange(&GuardedMutex->Count,
OldValue &~ GM_LOCK_WAITER_WOKEN,
OldValue) == OldValue)
{
/* Signal the Gate */
KeSignalGateBoostPriority(&GuardedMutex->Gate);
}
}
}
VOID
FASTCALL
KeReleaseGuardedMutex(PKGUARDED_MUTEX GuardedMutex)
{
/* Do the actual release */
KeReleaseGuardedMutexUnsafe(GuardedMutex);
/* Re-enable APCs */
KeLeaveGuardedRegion();
}
BOOL
FASTCALL
KeTryToAcquireGuardedMutex(PKGUARDED_MUTEX GuardedMutex)
{
/* Block APCs */
KeEnterGuardedRegion();
/* Remove the lock */
if (InterlockedClearBit(&GuardedMutex->Count, 0))
{
/* Re-enable APCs */
KeLeaveGuardedRegion();
/* Return failure */
return FALSE;
}
/* Set the Owner */
GuardedMutex->Owner = KeGetCurrentThread();
return TRUE;
}
/* EOF */

View file

@ -649,6 +649,7 @@ KeWaitForSingleObject@20
;KiBugCheckData DATA
KiCoprocessorError@0
KiDeliverApc@12
KiKernelApcDeliveryCheck@0
KiDispatchInterrupt@0
KiEnableTimerWatchdog
KiInterruptDispatch2@8

View file

@ -44,10 +44,15 @@ static GENERIC_MAPPING PiThreadMapping = {STANDARD_RIGHTS_READ | THREAD_GET_CONT
/* FUNCTIONS ***************************************************************/
#ifdef KeGetCurrentThread
#undef KeGetCurrentThread
#endif
/*
* @implemented
*/
PKTHREAD STDCALL KeGetCurrentThread(VOID)
PKTHREAD
STDCALL
KeGetCurrentThread(VOID)
{
#ifdef CONFIG_SMP
ULONG Flags;

View file

@ -929,6 +929,21 @@ typedef struct _FAST_MUTEX {
ULONG OldIrql;
} FAST_MUTEX, *PFAST_MUTEX;
typedef struct _KGUARDED_MUTEX
{
LONG Count;
struct _KTHREAD* Owner;
ULONG Contention;
struct _KGATE* Gate;
union {
struct {
SHORT KernelApcDisable;
SHORT SpecialApcDisable;
};
ULONG CombinedApcDisable;
};
} KGUARDED_MUTEX, *PKGUARDED_MUTEX, *RESTRICTED_POINTER PRKGUARDED_MUTEX;
typedef struct _KTIMER {
DISPATCHER_HEADER Header;
ULARGE_INTEGER DueTime;