mirror of
https://github.com/reactos/reactos.git
synced 2025-02-24 17:34:57 +00:00
[NTOSKRNL/HAL]
- Add explicit memory barriers to KxAcquireSpinLock, KxReleaseSpinLock inline functions and KeTryToAcquireQueuedSpinLock, KeTryToAcquireQueuedSpinLockRaiseToSynch in the UP case. This will prevent the compiler from reordering memory access instructions across the boundaries of these functions, even when being inlined. - Use the inline functions in x64 spinlock functions, too svn path=/trunk/; revision=52078
This commit is contained in:
parent
6394fc44c7
commit
b19da39918
3 changed files with 75 additions and 46 deletions
|
@ -188,6 +188,10 @@ KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
||||||
/* Simply raise to synch */
|
/* Simply raise to synch */
|
||||||
KeRaiseIrql(SYNCH_LEVEL, OldIrql);
|
KeRaiseIrql(SYNCH_LEVEL, OldIrql);
|
||||||
|
|
||||||
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
||||||
|
memory accesses across the borders of spinlocks */
|
||||||
|
_ReadWriteBarrier();
|
||||||
|
|
||||||
/* Always return true on UP Machines */
|
/* Always return true on UP Machines */
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
@ -208,6 +212,10 @@ KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
||||||
/* Simply raise to dispatch */
|
/* Simply raise to dispatch */
|
||||||
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
|
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
|
||||||
|
|
||||||
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
||||||
|
memory accesses across the borders of spinlocks */
|
||||||
|
_ReadWriteBarrier();
|
||||||
|
|
||||||
/* Always return true on UP Machines */
|
/* Always return true on UP Machines */
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,10 @@ KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
|
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
|
||||||
UNREFERENCED_PARAMETER(SpinLock);
|
UNREFERENCED_PARAMETER(SpinLock);
|
||||||
|
|
||||||
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
||||||
|
memory accesses across the borders of spinlocks */
|
||||||
|
_ReadWriteBarrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -32,6 +36,10 @@ KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
|
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
|
||||||
UNREFERENCED_PARAMETER(SpinLock);
|
UNREFERENCED_PARAMETER(SpinLock);
|
||||||
|
|
||||||
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
||||||
|
memory accesses across the borders of spinlocks */
|
||||||
|
_ReadWriteBarrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -23,14 +23,14 @@
|
||||||
KIRQL
|
KIRQL
|
||||||
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
|
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
|
||||||
KIRQL OldIrql;
|
KIRQL OldIrql;
|
||||||
/* Simply raise to dispatch */
|
|
||||||
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
/* Raise to sync */
|
||||||
|
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
|
||||||
|
|
||||||
|
/* Acquire the lock and return */
|
||||||
|
KxAcquireSpinLock(SpinLock);
|
||||||
return OldIrql;
|
return OldIrql;
|
||||||
#else
|
|
||||||
UNIMPLEMENTED;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -40,14 +40,14 @@ KIRQL
|
||||||
NTAPI
|
NTAPI
|
||||||
KeAcquireSpinLockRaiseToDpc(PKSPIN_LOCK SpinLock)
|
KeAcquireSpinLockRaiseToDpc(PKSPIN_LOCK SpinLock)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
|
||||||
KIRQL OldIrql;
|
KIRQL OldIrql;
|
||||||
/* Simply raise to dispatch */
|
|
||||||
|
/* Raise to dispatch */
|
||||||
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
||||||
|
|
||||||
|
/* Acquire the lock and return */
|
||||||
|
KxAcquireSpinLock(SpinLock);
|
||||||
return OldIrql;
|
return OldIrql;
|
||||||
#else
|
|
||||||
UNIMPLEMENTED;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -58,12 +58,9 @@ NTAPI
|
||||||
KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
|
KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
|
||||||
KIRQL OldIrql)
|
KIRQL OldIrql)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
/* Release the lock and lower IRQL back */
|
||||||
/* Simply lower IRQL back */
|
KxReleaseSpinLock(SpinLock);
|
||||||
KeLowerIrql(OldIrql);
|
KeLowerIrql(OldIrql);
|
||||||
#else
|
|
||||||
UNIMPLEMENTED;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -72,14 +69,14 @@ KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
|
||||||
KIRQL
|
KIRQL
|
||||||
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
|
||||||
KIRQL OldIrql;
|
KIRQL OldIrql;
|
||||||
/* Simply raise to dispatch */
|
|
||||||
|
/* Raise to dispatch */
|
||||||
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
||||||
|
|
||||||
|
/* Acquire the lock */
|
||||||
|
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
|
||||||
return OldIrql;
|
return OldIrql;
|
||||||
#else
|
|
||||||
UNIMPLEMENTED;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -88,14 +85,14 @@ KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
||||||
KIRQL
|
KIRQL
|
||||||
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
|
||||||
KIRQL OldIrql;
|
KIRQL OldIrql;
|
||||||
/* Simply raise to dispatch */
|
|
||||||
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
|
/* Raise to synch */
|
||||||
|
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
|
||||||
|
|
||||||
|
/* Acquire the lock */
|
||||||
|
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
|
||||||
return OldIrql;
|
return OldIrql;
|
||||||
#else
|
|
||||||
UNIMPLEMENTED;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -105,14 +102,18 @@ VOID
|
||||||
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
|
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
|
||||||
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
/* Set up the lock */
|
||||||
/* Simply raise to dispatch */
|
LockHandle->LockQueue.Next = NULL;
|
||||||
|
LockHandle->LockQueue.Lock = SpinLock;
|
||||||
|
|
||||||
|
/* Raise to dispatch */
|
||||||
KeRaiseIrql(DISPATCH_LEVEL, &LockHandle->OldIrql);
|
KeRaiseIrql(DISPATCH_LEVEL, &LockHandle->OldIrql);
|
||||||
#else
|
|
||||||
UNIMPLEMENTED;
|
/* Acquire the lock */
|
||||||
#endif
|
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @implemented
|
* @implemented
|
||||||
*/
|
*/
|
||||||
|
@ -120,14 +121,18 @@ VOID
|
||||||
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
|
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
|
||||||
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
IN PKLOCK_QUEUE_HANDLE LockHandle)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
/* Set up the lock */
|
||||||
/* Simply raise to synch */
|
LockHandle->LockQueue.Next = NULL;
|
||||||
|
LockHandle->LockQueue.Lock = SpinLock;
|
||||||
|
|
||||||
|
/* Raise to synch */
|
||||||
KeRaiseIrql(SYNCH_LEVEL, &LockHandle->OldIrql);
|
KeRaiseIrql(SYNCH_LEVEL, &LockHandle->OldIrql);
|
||||||
#else
|
|
||||||
UNIMPLEMENTED;
|
/* Acquire the lock */
|
||||||
#endif
|
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @implemented
|
* @implemented
|
||||||
*/
|
*/
|
||||||
|
@ -135,28 +140,26 @@ VOID
|
||||||
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
||||||
IN KIRQL OldIrql)
|
IN KIRQL OldIrql)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
/* Release the lock */
|
||||||
/* Simply lower IRQL back */
|
KxReleaseSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
|
||||||
|
|
||||||
|
/* Lower IRQL back */
|
||||||
KeLowerIrql(OldIrql);
|
KeLowerIrql(OldIrql);
|
||||||
#else
|
|
||||||
UNIMPLEMENTED;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @implemented
|
* @implemented
|
||||||
*/
|
*/
|
||||||
VOID
|
VOID
|
||||||
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
|
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SMP
|
|
||||||
/* Simply lower IRQL back */
|
/* Simply lower IRQL back */
|
||||||
|
KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
|
||||||
KeLowerIrql(LockHandle->OldIrql);
|
KeLowerIrql(LockHandle->OldIrql);
|
||||||
#else
|
|
||||||
UNIMPLEMENTED;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @implemented
|
* @implemented
|
||||||
*/
|
*/
|
||||||
|
@ -168,10 +171,15 @@ KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
||||||
/* Simply raise to dispatch */
|
/* Simply raise to dispatch */
|
||||||
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
|
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
|
||||||
|
|
||||||
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
||||||
|
memory accesses across the borders of spinlocks */
|
||||||
|
_ReadWriteBarrier();
|
||||||
|
|
||||||
/* Always return true on UP Machines */
|
/* Always return true on UP Machines */
|
||||||
return TRUE;
|
return TRUE;
|
||||||
#else
|
#else
|
||||||
UNIMPLEMENTED;
|
UNIMPLEMENTED;
|
||||||
|
ASSERT(FALSE);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,10 +194,15 @@ KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
|
||||||
/* Simply raise to dispatch */
|
/* Simply raise to dispatch */
|
||||||
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
|
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
|
||||||
|
|
||||||
|
/* Add an explicit memory barrier to prevent the compiler from reordering
|
||||||
|
memory accesses across the borders of spinlocks */
|
||||||
|
_ReadWriteBarrier();
|
||||||
|
|
||||||
/* Always return true on UP Machines */
|
/* Always return true on UP Machines */
|
||||||
return TRUE;
|
return TRUE;
|
||||||
#else
|
#else
|
||||||
UNIMPLEMENTED;
|
UNIMPLEMENTED;
|
||||||
|
ASSERT(FALSE);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue