mirror of
https://github.com/reactos/reactos.git
synced 2024-12-28 18:15:11 +00:00
f30136bc79
There is no reason not to, and this avoids introducing bugs stupidly.
101 lines
2.4 KiB
C
101 lines
2.4 KiB
C
/*
|
|
* PROJECT: ReactOS Kernel
|
|
* LICENSE: GPL - See COPYING in the top level directory
|
|
* FILE: ntoskrnl/include/internal/spinlock.h
|
|
* PURPOSE: Internal Inlined Functions for spinlocks, shared with HAL
|
|
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
|
|
*/
|
|
|
|
#if defined(_M_IX86)
|
|
VOID
|
|
NTAPI
|
|
Kii386SpinOnSpinLock(PKSPIN_LOCK SpinLock, ULONG Flags);
|
|
#endif
|
|
|
|
//
|
|
// Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
|
|
//
|
|
_Acquires_nonreentrant_lock_(SpinLock)
|
|
FORCEINLINE
|
|
VOID
|
|
KxAcquireSpinLock(
|
|
#if defined(CONFIG_SMP) || DBG
|
|
_Inout_
|
|
#else
|
|
_Unreferenced_parameter_
|
|
#endif
|
|
PKSPIN_LOCK SpinLock)
|
|
{
|
|
#if DBG
|
|
/* Make sure that we don't own the lock already */
|
|
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
|
|
{
|
|
/* We do, bugcheck! */
|
|
KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
/* Try to acquire the lock */
|
|
while (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
|
|
{
|
|
#if defined(_M_IX86) && DBG
|
|
/* On x86 debug builds, we use a much slower but useful routine */
|
|
Kii386SpinOnSpinLock(SpinLock, 5);
|
|
#else
|
|
/* It's locked... spin until it's unlocked */
|
|
while (*(volatile KSPIN_LOCK *)SpinLock & 1)
|
|
{
|
|
/* Yield and keep looping */
|
|
YieldProcessor();
|
|
}
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
|
memory accesses across the borders of spinlocks */
|
|
KeMemoryBarrierWithoutFence();
|
|
|
|
#if DBG
|
|
/* On debug builds, we OR in the KTHREAD */
|
|
*SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
|
|
#endif
|
|
}
|
|
|
|
//
|
|
// Spinlock Release at IRQL >= DISPATCH_LEVEL
|
|
//
|
|
_Releases_nonreentrant_lock_(SpinLock)
|
|
FORCEINLINE
|
|
VOID
|
|
KxReleaseSpinLock(
|
|
#if defined(CONFIG_SMP) || DBG
|
|
_Inout_
|
|
#else
|
|
_Unreferenced_parameter_
|
|
#endif
|
|
PKSPIN_LOCK SpinLock)
|
|
{
|
|
#if DBG
|
|
/* Make sure that the threads match */
|
|
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
|
|
{
|
|
/* They don't, bugcheck */
|
|
KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
|
|
}
|
|
#endif
|
|
|
|
#if defined(CONFIG_SMP) || DBG
|
|
/* Clear the lock */
|
|
#ifdef _WIN64
|
|
InterlockedAnd64((PLONG64)SpinLock, 0);
|
|
#else
|
|
InterlockedAnd((PLONG)SpinLock, 0);
|
|
#endif
|
|
#endif
|
|
|
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
|
memory accesses across the borders of spinlocks */
|
|
KeMemoryBarrierWithoutFence();
|
|
}
|