mirror of
https://github.com/reactos/reactos.git
synced 2024-11-18 13:01:40 +00:00
209 lines
4.3 KiB
C
209 lines
4.3 KiB
C
/*
|
|
* PROJECT: ReactOS HAL
|
|
* LICENSE: GPL - See COPYING in the top level directory
|
|
* FILE: ntoskrnl/ke/amd64/spinlock.c
|
|
* PURPOSE: Spinlock and Queued Spinlock Support
|
|
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
|
|
*/
|
|
|
|
/* INCLUDES ******************************************************************/
|
|
|
|
#include <ntoskrnl.h>
|
|
#define NDEBUG
|
|
#include <debug.h>
|
|
|
|
#undef KeAcquireSpinLock
|
|
#undef KeReleaseSpinLock
|
|
|
|
/* FUNCTIONS *****************************************************************/
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
KIRQL
|
|
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
|
|
{
|
|
KIRQL OldIrql;
|
|
|
|
/* Raise to sync */
|
|
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
|
|
|
|
/* Acquire the lock and return */
|
|
KxAcquireSpinLock(SpinLock);
|
|
return OldIrql;
|
|
}
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
KIRQL
|
|
NTAPI
|
|
KeAcquireSpinLockRaiseToDpc(PKSPIN_LOCK SpinLock)
|
|
{
|
|
KIRQL OldIrql;
|
|
|
|
/* Raise to dispatch */
|
|
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
|
|
|
/* Acquire the lock and return */
|
|
KxAcquireSpinLock(SpinLock);
|
|
return OldIrql;
|
|
}
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
VOID
|
|
NTAPI
|
|
KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
|
|
KIRQL OldIrql)
|
|
{
|
|
/* Release the lock and lower IRQL back */
|
|
KxReleaseSpinLock(SpinLock);
|
|
KeLowerIrql(OldIrql);
|
|
}
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
KIRQL
|
|
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
|
{
|
|
KIRQL OldIrql;
|
|
|
|
/* Raise to dispatch */
|
|
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
|
|
|
/* Acquire the lock */
|
|
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
|
|
return OldIrql;
|
|
}
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
KIRQL
|
|
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
|
{
|
|
KIRQL OldIrql;
|
|
|
|
/* Raise to synch */
|
|
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
|
|
|
|
/* Acquire the lock */
|
|
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
|
|
return OldIrql;
|
|
}
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
VOID
|
|
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
|
|
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
|
{
|
|
/* Set up the lock */
|
|
LockHandle->LockQueue.Next = NULL;
|
|
LockHandle->LockQueue.Lock = SpinLock;
|
|
|
|
/* Raise to dispatch */
|
|
KeRaiseIrql(DISPATCH_LEVEL, &LockHandle->OldIrql);
|
|
|
|
/* Acquire the lock */
|
|
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
|
|
}
|
|
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
VOID
|
|
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
|
|
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
|
{
|
|
/* Set up the lock */
|
|
LockHandle->LockQueue.Next = NULL;
|
|
LockHandle->LockQueue.Lock = SpinLock;
|
|
|
|
/* Raise to synch */
|
|
KeRaiseIrql(SYNCH_LEVEL, &LockHandle->OldIrql);
|
|
|
|
/* Acquire the lock */
|
|
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
|
|
}
|
|
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
VOID
|
|
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
|
IN KIRQL OldIrql)
|
|
{
|
|
/* Release the lock */
|
|
KxReleaseSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
|
|
|
|
/* Lower IRQL back */
|
|
KeLowerIrql(OldIrql);
|
|
}
|
|
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
VOID
|
|
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
|
|
{
|
|
/* Simply lower IRQL back */
|
|
KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
|
|
KeLowerIrql(LockHandle->OldIrql);
|
|
}
|
|
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
BOOLEAN
|
|
KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
|
IN PKIRQL OldIrql)
|
|
{
|
|
#ifndef CONFIG_SMP
|
|
/* Simply raise to dispatch */
|
|
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
|
|
|
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
|
memory accesses across the borders of spinlocks */
|
|
KeMemoryBarrierWithoutFence();
|
|
|
|
/* Always return true on UP Machines */
|
|
return TRUE;
|
|
#else
|
|
UNIMPLEMENTED;
|
|
ASSERT(FALSE);
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* @implemented
|
|
*/
|
|
LOGICAL
|
|
KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
|
OUT PKIRQL OldIrql)
|
|
{
|
|
#ifndef CONFIG_SMP
|
|
/* Simply raise to dispatch */
|
|
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
|
|
|
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
|
memory accesses across the borders of spinlocks */
|
|
KeMemoryBarrierWithoutFence();
|
|
|
|
/* Always return true on UP Machines */
|
|
return TRUE;
|
|
#else
|
|
UNIMPLEMENTED;
|
|
ASSERT(FALSE);
|
|
#endif
|
|
}
|
|
|
|
/* EOF */
|