- Fix KiAcquireMutexContended, it was broken in contended cases.

- Inline Acquiring/Leaving guarded regions and the gmutex code, instead of going through 3-4 indirect calls.
- Add a mountain of ASSERTs to detect incorrect usage/state.
- Set ->SpecialApcDisable in the Guarded Mutex.
- Fix broken KTHREAD definition. SpecialApcDisable and KernelApcDisable were incorrectly marked as USHORT instead of SHORT, which could cause severe trouble under optimized builds (At least under MSVC, since MSVC wouldn't allow a negative number in an unsigned short).
- Use GM_LOCK_BIT_V.
- Fix broken KeTryToAcquireGuardedMutex prototype.
- Fix broken KGUARDED_MUTEX typedef and add bit values.
- Fix broken Interlocked* prototypes in regards to volatileness.

svn path=/trunk/; revision=23037
This commit is contained in:
Alex Ionescu 2006-07-13 04:20:18 +00:00
parent b71f24ddc1
commit 5ecb728b0e
7 changed files with 325 additions and 177 deletions

View file

@ -1167,20 +1167,27 @@ typedef struct _KGATE
DISPATCHER_HEADER Header; DISPATCHER_HEADER Header;
} KGATE, *PKGATE, *RESTRICTED_POINTER PRKGATE; } KGATE, *PKGATE, *RESTRICTED_POINTER PRKGATE;
#define GM_LOCK_BIT 0x1
#define GM_LOCK_BIT_V 0x0
#define GM_LOCK_WAITER_WOKEN 0x2
#define GM_LOCK_WAITER_INC 0x4
typedef struct _KGUARDED_MUTEX typedef struct _KGUARDED_MUTEX
{ {
LONG Count; volatile LONG Count;
struct _KTHREAD* Owner; PKTHREAD Owner;
ULONG Contention; ULONG Contention;
KGATE Gate; KGATE Gate;
union { union
struct { {
struct
{
SHORT KernelApcDisable; SHORT KernelApcDisable;
SHORT SpecialApcDisable; SHORT SpecialApcDisable;
}; };
ULONG CombinedApcDisable; ULONG CombinedApcDisable;
}; };
} KGUARDED_MUTEX, *PKGUARDED_MUTEX, *RESTRICTED_POINTER PRKGUARDED_MUTEX; } KGUARDED_MUTEX, *PKGUARDED_MUTEX;
typedef struct _KTIMER { typedef struct _KTIMER {
DISPATCHER_HEADER Header; DISPATCHER_HEADER Header;
@ -5195,19 +5202,19 @@ NTOSAPI
LONG LONG
DDKFASTAPI DDKFASTAPI
InterlockedIncrement( InterlockedIncrement(
IN PLONG VOLATILE Addend); IN OUT LONG volatile *Addend);
NTOSAPI NTOSAPI
LONG LONG
DDKFASTAPI DDKFASTAPI
InterlockedDecrement( InterlockedDecrement(
IN PLONG VOLATILE Addend); IN OUT LONG volatile *Addend);
NTOSAPI NTOSAPI
LONG LONG
DDKFASTAPI DDKFASTAPI
InterlockedCompareExchange( InterlockedCompareExchange(
IN OUT PLONG VOLATILE Destination, IN OUT LONG volatile *Destination,
IN LONG Exchange, IN LONG Exchange,
IN LONG Comparand); IN LONG Comparand);
@ -5215,14 +5222,14 @@ NTOSAPI
LONG LONG
DDKFASTAPI DDKFASTAPI
InterlockedExchange( InterlockedExchange(
IN OUT PLONG VOLATILE Target, IN OUT LONG volatile *Destination,
IN LONG Value); IN LONG Value);
NTOSAPI NTOSAPI
LONG LONG
DDKFASTAPI DDKFASTAPI
InterlockedExchangeAdd( InterlockedExchangeAdd(
IN OUT PLONG VOLATILE Addend, IN OUT LONG volatile *Addend,
IN LONG Value); IN LONG Value);
/* /*
@ -6315,48 +6322,52 @@ RtlxUnicodeStringToAnsiSize(
/* Guarded Mutex routines */ /* Guarded Mutex routines */
VOID VOID
FASTCALL FASTCALL
KeAcquireGuardedMutex( KeAcquireGuardedMutex(
PKGUARDED_MUTEX GuardedMutex IN OUT PKGUARDED_MUTEX GuardedMutex
); );
VOID VOID
FASTCALL FASTCALL
KeAcquireGuardedMutexUnsafe( KeAcquireGuardedMutexUnsafe(
PKGUARDED_MUTEX GuardedMutex IN OUT PKGUARDED_MUTEX GuardedMutex
); );
VOID
STDCALL
KeEnterGuardedRegion(VOID);
VOID VOID
STDCALL NTAPI
KeLeaveGuardedRegion(VOID); KeEnterGuardedRegion(
VOID
);
VOID VOID
NTAPI
KeLeaveGuardedRegion(
VOID
);
VOID
FASTCALL FASTCALL
KeInitializeGuardedMutex( KeInitializeGuardedMutex(
PKGUARDED_MUTEX GuardedMutex OUT PKGUARDED_MUTEX GuardedMutex
); );
VOID VOID
FASTCALL FASTCALL
KeReleaseGuardedMutexUnsafe( KeReleaseGuardedMutexUnsafe(
PKGUARDED_MUTEX GuardedMutex IN OUT PKGUARDED_MUTEX GuardedMutex
); );
VOID VOID
FASTCALL FASTCALL
KeReleaseGuardedMutex( KeReleaseGuardedMutex(
PKGUARDED_MUTEX GuardedMutex IN OUT PKGUARDED_MUTEX GuardedMutex
); );
BOOL BOOLEAN
FASTCALL FASTCALL
KeTryToAcquireGuardedMutex( KeTryToAcquireGuardedMutex(
PKGUARDED_MUTEX GuardedMutex IN OUT PKGUARDED_MUTEX GuardedMutex
); );
/* Fast Mutex */ /* Fast Mutex */

View file

@ -695,8 +695,8 @@ typedef struct _KTHREAD
{ {
struct struct
{ {
USHORT KernelApcDisable; SHORT KernelApcDisable;
USHORT SpecialApcDisable; SHORT SpecialApcDisable;
}; };
ULONG CombinedApcDisable; ULONG CombinedApcDisable;
}; };

View file

@ -3785,7 +3785,7 @@ static __inline PVOID GetFiberData(void)
#if defined(__GNUC__) #if defined(__GNUC__)
static __inline__ BOOLEAN static __inline__ BOOLEAN
InterlockedBitTestAndSet(IN LONG *Base, InterlockedBitTestAndSet(IN LONG volatile *Base,
IN LONG Bit) IN LONG Bit)
{ {
LONG OldBit; LONG OldBit;
@ -3800,7 +3800,7 @@ InterlockedBitTestAndSet(IN LONG *Base,
} }
static __inline__ BOOLEAN static __inline__ BOOLEAN
InterlockedBitTestAndReset(IN LONG *Base, InterlockedBitTestAndReset(IN LONG volatile *Base,
IN LONG Bit) IN LONG Bit)
{ {
LONG OldBit; LONG OldBit;

View file

@ -709,4 +709,6 @@ KeV86Exception(
ULONG address ULONG address
); );
#include "ke_x.h"
#endif /* __NTOSKRNL_INCLUDE_INTERNAL_KE_H */ #endif /* __NTOSKRNL_INCLUDE_INTERNAL_KE_H */

View file

@ -0,0 +1,59 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/include/ke_x.h
* PURPOSE: Internal Inlined Functions for the Kernel
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
*/
//
// Guarded Region Routines
//
#define KeEnterGuardedRegion() \
{ \
PKTHREAD Thread = KeGetCurrentThread(); \
\
/* Sanity checks */ \
ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
ASSERT(Thread == KeGetCurrentThread()); \
ASSERT((Thread->SpecialApcDisable <= 0) && \
(Thread->SpecialApcDisable != -32768)); \
\
/* Disable Special APCs */ \
Thread->SpecialApcDisable--; \
}
#define KeLeaveGuardedRegion() \
{ \
PKTHREAD Thread = KeGetCurrentThread(); \
\
/* Sanity checks */ \
ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
ASSERT(Thread == KeGetCurrentThread()); \
ASSERT(Thread->SpecialApcDisable < 0); \
\
/* Leave region and check if APCs are OK now */ \
if (!(++Thread->SpecialApcDisable)) \
{ \
/* Check for Kernel APCs on the list */ \
if (!IsListEmpty(&Thread->ApcState. \
ApcListHead[KernelMode])) \
{ \
/* Check for APC Delivery */ \
KiCheckForKernelApcDelivery(); \
} \
} \
}
//
// TODO: Guarded Mutex Routines
//
//
// TODO: Critical Region Routines
//
//
// TODO: Wait Routines
//

View file

@ -1,10 +1,9 @@
/* /*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS Kernel * PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ke/gate.c * FILE: ntoskrnl/ke/gate.c
* PURPOSE: Implements the Gate Dispatcher Object * PURPOSE: Implements the Gate Dispatcher Object
* * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
* PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
*/ */
/* INCLUDES *****************************************************************/ /* INCLUDES *****************************************************************/
@ -18,10 +17,8 @@
VOID VOID
FASTCALL FASTCALL
KeInitializeGate(PKGATE Gate) KeInitializeGate(IN PKGATE Gate)
{ {
DPRINT("KeInitializeGate(Gate %x)\n", Gate);
/* Initialize the Dispatcher Header */ /* Initialize the Dispatcher Header */
KeInitializeDispatcherHeader(&Gate->Header, KeInitializeDispatcherHeader(&Gate->Header,
GateObject, GateObject,
@ -98,7 +95,7 @@ KeWaitForGate(IN PKGATE Gate,
VOID VOID
FASTCALL FASTCALL
KeSignalGateBoostPriority(PKGATE Gate) KeSignalGateBoostPriority(IN PKGATE Gate)
{ {
PKTHREAD WaitThread; PKTHREAD WaitThread;
PKWAIT_BLOCK WaitBlock; PKWAIT_BLOCK WaitBlock;

View file

@ -1,72 +1,139 @@
/* /*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS Kernel * PROJECT: ReactOS Kernel
* FILE: ntoskrnl/ke/gmutex.c * LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ke/gate.c
* PURPOSE: Implements Guarded Mutex * PURPOSE: Implements Guarded Mutex
* * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
* PROGRAMMERS: Alex Ionescu (alex@relsoft.net) and * Filip Navara (navaraf@reactos.org)
* Filip Navara (xnavara@volny.cz)
*/ */
/* INCLUDES *****************************************************************/ /* INCLUDES ******************************************************************/
#define NTDDI_VERSION NTDDI_WS03SP1
#include <ntoskrnl.h> #include <ntoskrnl.h>
#define NDEBUG
#include <internal/debug.h> #include <internal/debug.h>
UCHAR /* PRIVATE FUNCTIONS *********************************************************/
VOID
FASTCALL FASTCALL
InterlockedClearBit(PLONG Destination, KiAcquireGuardedMutexContented(IN OUT PKGUARDED_MUTEX GuardedMutex)
LONG Bit);
typedef enum _KGUARDED_MUTEX_BITS
{ {
GM_LOCK_BIT = 1, ULONG BitsToRemove, BitsToAdd;
GM_LOCK_WAITER_WOKEN = 2, LONG OldValue, NewValue;
GM_LOCK_WAITER_INC = 4
} KGUARDED_MUTEX_BITS;
/* FUNCTIONS *****************************************************************/ /* Increase the contention count */
GuardedMutex->Contention++;
/** /* Start by unlocking the Guarded Mutex */
* @name KeEnterGuardedRegion BitsToRemove = GM_LOCK_BIT;
* BitsToAdd = GM_LOCK_WAITER_INC;
* Enters a guarded region. This causes all (incl. special kernel) APCs
* to be disabled.
*/
VOID
STDCALL
KeEnterGuardedRegion(VOID)
{
/* Disable Special APCs */
KeGetCurrentThread()->SpecialApcDisable--;
}
/** /* Get the Count Bits */
* @name KeLeaveGuardedRegion OldValue = GuardedMutex->Count;
*
* Leaves a guarded region and delivers pending APCs if possible.
*/
VOID
STDCALL
KeLeaveGuardedRegion(VOID)
{
PKTHREAD Thread = KeGetCurrentThread();
/* Boost the enable count and check if Special APCs are enabled */ /* Start change loop */
if (++Thread->SpecialApcDisable == 0) for (;;)
{ {
/* Check if there are Kernel APCs on the list */ /* Loop sanity checks */
if (!IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode])) ASSERT((BitsToRemove == GM_LOCK_BIT) ||
(BitsToRemove == (GM_LOCK_BIT | GM_LOCK_WAITER_WOKEN)));
ASSERT((BitsToAdd == GM_LOCK_WAITER_INC) ||
(BitsToAdd == GM_LOCK_WAITER_WOKEN));
/* Check if the Guarded Mutex is locked */
if (OldValue & GM_LOCK_BIT)
{ {
/* Check for APC Delivery */ /* Sanity check */
KiCheckForKernelApcDelivery(); ASSERT((BitsToRemove == GM_LOCK_BIT) ||
((OldValue & GM_LOCK_WAITER_WOKEN) != 0));
/* Unlock it by removing the Lock Bit */
NewValue = InterlockedCompareExchange(&GuardedMutex->Count,
OldValue ^ BitsToRemove,
OldValue);
if (NewValue == OldValue) break;
/* Value got changed behind our backs, start over */
OldValue = NewValue;
} }
else
{
/* The Guarded Mutex isn't locked, so simply set the bits */
NewValue = InterlockedCompareExchange(&GuardedMutex->Count,
OldValue + BitsToAdd,
OldValue);
if (NewValue != OldValue)
{
/* Value got changed behind our backs, start over */
OldValue = NewValue;
continue;
}
/* Now we have to wait for it */
KeWaitForGate(&GuardedMutex->Gate, WrGuardedMutex, KernelMode);
ASSERT((GuardedMutex->Count & GM_LOCK_WAITER_WOKEN) != 0);
/* Ok, the wait is done, so set the new bits */
BitsToRemove = GM_LOCK_BIT | GM_LOCK_WAITER_WOKEN;
BitsToAdd = GM_LOCK_WAITER_WOKEN;
}
} }
} }
VOID VOID
FORCEINLINE
FASTCALL FASTCALL
KeInitializeGuardedMutex(PKGUARDED_MUTEX GuardedMutex) KiAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
{
BOOLEAN OldBit;
/* Remove the lock */
OldBit = InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V);
if (!OldBit)
{
/* The Guarded Mutex was already locked, enter contented case */
KiAcquireGuardedMutexContented(GuardedMutex);
}
}
VOID
FORCEINLINE
FASTCALL
KiReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
{
LONG OldValue;
/* Destroy the Owner */
GuardedMutex->Owner = NULL;
/* Add the Lock Bit */
OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, 1);
ASSERT((OldValue & GM_LOCK_BIT) == 0);
/* Check if it was already locked, but not woken */
if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
{
/* Update the Oldvalue to what it should be now */
OldValue |= GM_LOCK_BIT;
/* Remove the Woken bit */
if (InterlockedCompareExchange(&GuardedMutex->Count,
OldValue - GM_LOCK_WAITER_WOKEN,
OldValue) == OldValue)
{
/* Signal the Gate */
KeSignalGateBoostPriority(&GuardedMutex->Gate);
}
}
}
/* PUBLIC FUNCTIONS **********************************************************/
VOID
FASTCALL
KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
{ {
/* Setup the Initial Data */ /* Setup the Initial Data */
GuardedMutex->Count = GM_LOCK_BIT; GuardedMutex->Count = GM_LOCK_BIT;
@ -79,143 +146,155 @@ KeInitializeGuardedMutex(PKGUARDED_MUTEX GuardedMutex)
VOID VOID
FASTCALL FASTCALL
KiAcquireGuardedMutexContented(PKGUARDED_MUTEX GuardedMutex) KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
{ {
ULONG BitsToRemove; PKTHREAD Thread = KeGetCurrentThread();
ULONG BitsToAdd;
LONG OldValue;
/* Increase the contention count */ /* Sanity checks */
InterlockedIncrement((PLONG)&GuardedMutex->Contention); ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
(Thread->SpecialApcDisable < 0) ||
(Thread->Teb == NULL) ||
(Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
ASSERT(GuardedMutex->Owner != Thread);
/* Start by unlocking the Guarded Mutex */ /* Do the actual acquire */
BitsToRemove = GM_LOCK_BIT; KiAcquireGuardedMutex(GuardedMutex);
BitsToAdd = GM_LOCK_WAITER_INC;
while (1)
{
/* Get the Count Bits */
OldValue = (volatile LONG)GuardedMutex->Count;
/* Check if the Guarded Mutex is locked */
if (OldValue & GM_LOCK_BIT)
{
/* Unlock it by removing the Lock Bit */
if (InterlockedCompareExchange(&GuardedMutex->Count,
OldValue &~ BitsToRemove,
OldValue) == OldValue)
{
/* The Guarded Mutex is now unlocked */
break;
}
}
else
{
/* The Guarded Mutex isn't locked, so simply set the bits */
if (InterlockedCompareExchange(&GuardedMutex->Count,
OldValue | BitsToAdd,
OldValue) != OldValue)
{
/* The Guarded Mutex value changed behind our back, start over */
continue;
}
/* Now we have to wait for it */
KeWaitForGate(&GuardedMutex->Gate, WrGuardedMutex, KernelMode);
/* Ok, the wait is done, so set the new bits */
BitsToRemove = GM_LOCK_BIT | GM_LOCK_WAITER_WOKEN;
BitsToAdd = GM_LOCK_WAITER_WOKEN;
}
}
}
VOID
FASTCALL
KeAcquireGuardedMutexUnsafe(PKGUARDED_MUTEX GuardedMutex)
{
/* Remove the lock */
if (!InterlockedBitTestAndReset(&GuardedMutex->Count, 0))
{
/* The Guarded Mutex was already locked, enter contented case */
KiAcquireGuardedMutexContented(GuardedMutex);
}
/* Set the Owner */ /* Set the Owner */
GuardedMutex->Owner = KeGetCurrentThread(); GuardedMutex->Owner = Thread;
} }
VOID VOID
FASTCALL FASTCALL
KeReleaseGuardedMutexUnsafe(PKGUARDED_MUTEX GuardedMutex) KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
{ {
LONG OldValue; /* Sanity checks */
ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
(KeGetCurrentThread()->SpecialApcDisable < 0) ||
(KeGetCurrentThread()->Teb == NULL) ||
(KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
/* Destroy the Owner */ /* Release the mutex */
GuardedMutex->Owner = NULL; KiReleaseGuardedMutex(GuardedMutex);
/* Add the Lock Bit */
OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, 1);
/* Check if it was already locked, but not woken */
if (OldValue && !(OldValue & GM_LOCK_WAITER_WOKEN))
{
/* Update the Oldvalue to what it should be now */
OldValue |= GM_LOCK_BIT;
/* Remove the Woken bit */
if (InterlockedCompareExchange(&GuardedMutex->Count,
OldValue &~ GM_LOCK_WAITER_WOKEN,
OldValue) == OldValue)
{
/* Signal the Gate */
KeSignalGateBoostPriority(&GuardedMutex->Gate);
}
}
} }
VOID VOID
FASTCALL FASTCALL
KeAcquireGuardedMutex(PKGUARDED_MUTEX GuardedMutex) KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
{ {
PKTHREAD Thread = KeGetCurrentThread();
/* Sanity checks */
ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL);
ASSERT(GuardedMutex->Owner != Thread);
/* Disable Special APCs */ /* Disable Special APCs */
KeEnterGuardedRegion(); KeEnterGuardedRegion();
/* Do the Unsafe Acquire */ /* Do the actual acquire */
KeAcquireGuardedMutexUnsafe(GuardedMutex); KiAcquireGuardedMutex(GuardedMutex);
/* Set the Owner and Special APC Disable state */
GuardedMutex->Owner = Thread;
GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
} }
VOID VOID
FASTCALL FASTCALL
KeReleaseGuardedMutex(PKGUARDED_MUTEX GuardedMutex) KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
{ {
/* Do the actual release */ /* Sanity checks */
KeReleaseGuardedMutexUnsafe(GuardedMutex); ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL);
ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
ASSERT(GuardedMutex->SpecialApcDisable ==
KeGetCurrentThread()->SpecialApcDisable);
/* Release the mutex */
KiReleaseGuardedMutex(GuardedMutex);
/* Re-enable APCs */ /* Re-enable APCs */
KeLeaveGuardedRegion(); KeLeaveGuardedRegion();
} }
BOOL BOOLEAN
FASTCALL FASTCALL
KeTryToAcquireGuardedMutex(PKGUARDED_MUTEX GuardedMutex) KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
{ {
PKTHREAD Thread = KeGetCurrentThread();
BOOLEAN OldBit;
/* Block APCs */ /* Block APCs */
KeEnterGuardedRegion(); KeEnterGuardedRegion();
/* Remove the lock */ /* Remove the lock */
if (InterlockedBitTestAndReset(&GuardedMutex->Count, 0)) OldBit = InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V);
if (OldBit)
{ {
/* Re-enable APCs */ /* Re-enable APCs */
KeLeaveGuardedRegion(); KeLeaveGuardedRegion();
YieldProcessor();
/* Return failure */ /* Return failure */
return FALSE; return FALSE;
} }
/* Set the Owner */ /* Set the Owner and APC State */
GuardedMutex->Owner = KeGetCurrentThread(); GuardedMutex->Owner = Thread;
GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
return TRUE; return TRUE;
} }
/**
* @name KeEnterGuardedRegion
*
* Enters a guarded region. This causes all (incl. special kernel) APCs
* to be disabled.
*/
#undef KeEnterGuardedRegion
VOID
NTAPI
KeEnterGuardedRegion(VOID)
{
PKTHREAD Thread = KeGetCurrentThread();
/* Sanity checks */
ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL);
ASSERT(Thread == KeGetCurrentThread());
ASSERT((Thread->SpecialApcDisable <= 0) &&
(Thread->SpecialApcDisable != -32768));
/* Disable Special APCs */
Thread->SpecialApcDisable--;
}
/**
* @name KeLeaveGuardedRegion
*
* Leaves a guarded region and delivers pending APCs if possible.
*/
#undef KeLeaveGuardedRegion
VOID
NTAPI
KeLeaveGuardedRegion(VOID)
{
PKTHREAD Thread = KeGetCurrentThread();
/* Sanity checks */
ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL);
ASSERT(Thread == KeGetCurrentThread());
ASSERT(Thread->SpecialApcDisable < 0);
/* Boost the enable count and check if Special APCs are enabled */
if (!(++Thread->SpecialApcDisable))
{
/* Check if there are Kernel APCs on the list */
if (!IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode]))
{
/* Check for APC Delivery */
KiCheckForKernelApcDelivery();
}
}
}
/* EOF */ /* EOF */