Merge from amd64-branch:

44886 KxAcquireSpinLock: check for already owned lock only on debug builds, fix check in inner loop, don't call Kii386SpinOnSpinLock inside the inner loop, but instead of it, simplify the code. Stubplement Kii386SpinOnSpinLock in C
44893 Move spinlock inline functions into their own header, so they can be shared with hal.

svn path=/trunk/; revision=45196
This commit is contained in:
Timo Kreuzer 2010-01-21 22:34:01 +00:00
commit f4519c0c43
4 changed files with 108 additions and 94 deletions

View file

@ -102,27 +102,6 @@ KeGetPreviousMode(VOID)
}
#ifndef CONFIG_SMP
//
// Spinlock Acquire at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
}
//
// Spinlock Release at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
}
//
// This routine protects against multiple CPU acquires, it's meaningless on UP.
@ -303,72 +282,6 @@ KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
#else
//
// Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* Make sure that we don't own the lock already */
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
{
/* We do, bugcheck! */
KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
}
/* Start acquire loop */
for (;;)
{
/* Try to acquire it */
if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
{
/* Value changed... wait until it's unlocked */
while (*(volatile KSPIN_LOCK *)SpinLock == 1)
{
#if DBG
/* On debug builds, we use a much slower but useful routine */
//Kii386SpinOnSpinLock(SpinLock, 5);
/* FIXME: Do normal yield for now */
YieldProcessor();
#else
/* Otherwise, just yield and keep looping */
YieldProcessor();
#endif
}
}
else
{
#if DBG
/* On debug builds, we OR in the KTHREAD */
*SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
#endif
/* All is well, break out */
break;
}
}
}
//
// Spinlock Release at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
#if DBG
/* Make sure that the threads match */
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
{
/* They don't, bugcheck */
KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
}
#endif
/* Clear the lock */
InterlockedAnd((PLONG)SpinLock, 0);
}
FORCEINLINE
VOID
KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)

View file

@ -83,6 +83,7 @@
#include "../kdbg/kdb.h"
#endif
#include "dbgk.h"
#include "spinlock.h"
#include "tag.h"
#include "test.h"
#include "inbv.h"

View file

@ -0,0 +1,95 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/include/spinlock.h
* PURPOSE: Internal Inlined Functions for spinlocks, shared with HAL
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
*/
VOID
NTAPI
Kii386SpinOnSpinLock(PKSPIN_LOCK SpinLock, ULONG Flags);
#ifndef CONFIG_SMP
//
// Spinlock Acquire at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
}
//
// Spinlock Release at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
}
#else
//
// Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
#ifdef DBG
/* Make sure that we don't own the lock already */
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
{
/* We do, bugcheck! */
KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
}
#endif
/* Try to acquire the lock */
while (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
{
#if defined(_M_IX86) && defined(DBG)
/* On x86 debug builds, we use a much slower but useful routine */
Kii386SpinOnSpinLock(SpinLock, 5);
#else
/* It's locked... spin until it's unlocked */
while (*(volatile KSPIN_LOCK *)SpinLock & 1)
{
/* Yield and keep looping */
YieldProcessor();
}
#endif
}
#ifdef DBG
/* On debug builds, we OR in the KTHREAD */
*SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
#endif
}
//
// Spinlock Release at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
#if DBG
/* Make sure that the threads match */
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
{
/* They don't, bugcheck */
KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
}
#endif
/* Clear the lock */
InterlockedAnd((PLONG)SpinLock, 0);
}
#endif

View file

@ -456,15 +456,20 @@ KeTestSpinLock(IN PKSPIN_LOCK SpinLock)
}
#ifdef _M_IX86
/*
* @unimplemented
*/
VOID
NTAPI
Kii386SpinOnSpinLock(IN PKSPIN_LOCK SpinLock,
IN ULONG Flags)
Kii386SpinOnSpinLock(PKSPIN_LOCK SpinLock, ULONG Flags)
{
UNIMPLEMENTED;
while (TRUE);
// FIXME: Handle flags
UNREFERENCED_PARAMETER(Flags);
/* Spin until it's unlocked */
while (*(volatile KSPIN_LOCK *)SpinLock & 1)
{
// FIXME: Check for timeout
/* Yield and keep looping */
YieldProcessor();
}
}
#endif