[NTOSKRNL/HAL]

- Add explicit memory barriers to KxAcquireSpinLock, KxReleaseSpinLock inline functions and KeTryToAcquireQueuedSpinLock, KeTryToAcquireQueuedSpinLockRaiseToSynch in the UP case. This will prevent the compiler from reordering memory access instructions across the boundaries of these functions, even when being inlined.
- Use the inline functions in x64 spinlock functions, too

svn path=/trunk/; revision=52078
This commit is contained in:
Timo Kreuzer 2011-06-04 12:33:54 +00:00
parent 6394fc44c7
commit b19da39918
3 changed files with 75 additions and 46 deletions

View file

@ -188,6 +188,10 @@ KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
/* Simply raise to synch */
KeRaiseIrql(SYNCH_LEVEL, OldIrql);
/* Add an explicit memory barrier to prevent the compiler from reordering
memory accesses across the borders of spinlocks */
_ReadWriteBarrier();
/* Always return true on UP Machines */
return TRUE;
}
@ -208,6 +212,10 @@ KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
/* Add an explicit memory barrier to prevent the compiler from reordering
memory accesses across the borders of spinlocks */
_ReadWriteBarrier();
/* Always return true on UP Machines */
return TRUE;
}

View file

@ -21,6 +21,10 @@ KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
/* Add an explicit memory barrier to prevent the compiler from reordering
memory accesses across the borders of spinlocks */
_ReadWriteBarrier();
}
//
@ -32,6 +36,10 @@ KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
/* Add an explicit memory barrier to prevent the compiler from reordering
memory accesses across the borders of spinlocks */
_ReadWriteBarrier();
}
#else

View file

@ -23,14 +23,14 @@
KIRQL
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
{
#ifndef CONFIG_SMP
KIRQL OldIrql;
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
/* Raise to sync */
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
/* Acquire the lock and return */
KxAcquireSpinLock(SpinLock);
return OldIrql;
#else
UNIMPLEMENTED;
#endif
}
/*
@ -40,14 +40,14 @@ KIRQL
NTAPI
KeAcquireSpinLockRaiseToDpc(PKSPIN_LOCK SpinLock)
{
#ifndef CONFIG_SMP
KIRQL OldIrql;
/* Simply raise to dispatch */
/* Raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
/* Acquire the lock and return */
KxAcquireSpinLock(SpinLock);
return OldIrql;
#else
UNIMPLEMENTED;
#endif
}
/*
@ -58,12 +58,9 @@ NTAPI
KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
KIRQL OldIrql)
{
#ifndef CONFIG_SMP
/* Simply lower IRQL back */
/* Release the lock and lower IRQL back */
KxReleaseSpinLock(SpinLock);
KeLowerIrql(OldIrql);
#else
UNIMPLEMENTED;
#endif
}
/*
@ -72,14 +69,14 @@ KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
KIRQL
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
#ifndef CONFIG_SMP
KIRQL OldIrql;
/* Simply raise to dispatch */
/* Raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
/* Acquire the lock */
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
return OldIrql;
#else
UNIMPLEMENTED;
#endif
}
/*
@ -88,14 +85,14 @@ KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
KIRQL
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
#ifndef CONFIG_SMP
KIRQL OldIrql;
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
/* Raise to synch */
KeRaiseIrql(SYNCH_LEVEL, &OldIrql);
/* Acquire the lock */
KxAcquireSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
return OldIrql;
#else
UNIMPLEMENTED;
#endif
}
/*
@ -105,14 +102,18 @@ VOID
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
#ifndef CONFIG_SMP
/* Simply raise to dispatch */
/* Set up the lock */
LockHandle->LockQueue.Next = NULL;
LockHandle->LockQueue.Lock = SpinLock;
/* Raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &LockHandle->OldIrql);
#else
UNIMPLEMENTED;
#endif
/* Acquire the lock */
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
}
/*
* @implemented
*/
@ -120,14 +121,18 @@ VOID
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
#ifndef CONFIG_SMP
/* Simply raise to synch */
/* Set up the lock */
LockHandle->LockQueue.Next = NULL;
LockHandle->LockQueue.Lock = SpinLock;
/* Raise to synch */
KeRaiseIrql(SYNCH_LEVEL, &LockHandle->OldIrql);
#else
UNIMPLEMENTED;
#endif
/* Acquire the lock */
KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
}
/*
* @implemented
*/
@ -135,28 +140,26 @@ VOID
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
IN KIRQL OldIrql)
{
#ifndef CONFIG_SMP
/* Simply lower IRQL back */
/* Release the lock */
KxReleaseSpinLock(KeGetCurrentPrcb()->LockQueue[LockNumber].Lock); // HACK
/* Lower IRQL back */
KeLowerIrql(OldIrql);
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
VOID
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
{
#ifndef CONFIG_SMP
/* Simply lower IRQL back */
KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
KeLowerIrql(LockHandle->OldIrql);
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
@ -168,10 +171,15 @@ KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
/* Add an explicit memory barrier to prevent the compiler from reordering
memory accesses across the borders of spinlocks */
_ReadWriteBarrier();
/* Always return true on UP Machines */
return TRUE;
#else
UNIMPLEMENTED;
ASSERT(FALSE);
#endif
}
@ -186,10 +194,15 @@ KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
/* Add an explicit memory barrier to prevent the compiler from reordering
memory accesses across the borders of spinlocks */
_ReadWriteBarrier();
/* Always return true on UP Machines */
return TRUE;
#else
UNIMPLEMENTED;
ASSERT(FALSE);
#endif
}