UP/MP cleanup:
- remove KxAcquireSpinLock from halp.h, use the one from ntoskrnl instead
- Use generic/spinlock.c in hal_generic_up and hal_generic_mp
- Delete mp/spinlock.c
- Move cmos spinlock functions to spinlock.c to make sure they are compiled for both up and mp
- Move wrappers around fastcall functions to misc.c, as they are identical on up/mp.

svn path=/trunk/; revision=46446
This commit is contained in:
Timo Kreuzer 2010-03-25 18:37:59 +00:00
parent e928936a69
commit 929bbc7b7c
8 changed files with 99 additions and 422 deletions

View file

@ -15,44 +15,9 @@
/* GLOBALS *******************************************************************/
KSPIN_LOCK HalpSystemHardwareLock;
UCHAR HalpCmosCenturyOffset;
ULONG HalpSystemHardwareFlags;
/* PRIVATE FUNCTIONS **********************************************************/
VOID
NTAPI
HalpAcquireSystemHardwareSpinLock(VOID)
{
ULONG Flags;
/* Get flags and disable interrupts */
Flags = __readeflags();
_disable();
/* Acquire the lock */
KxAcquireSpinLock(&HalpSystemHardwareLock);
/* We have the lock, save the flags now */
HalpSystemHardwareFlags = Flags;
}
VOID
NTAPI
HalpReleaseCmosSpinLock(VOID)
{
ULONG Flags;
/* Get the flags */
Flags = HalpSystemHardwareFlags;
/* Release the lock */
KxReleaseSpinLock(&HalpSystemHardwareLock);
/* Restore the flags */
__writeeflags(Flags);
}
/* PRIVATE FUNCTIONS *********************************************************/
FORCEINLINE
UCHAR

View file

@ -245,3 +245,60 @@ KeFlushWriteBuffer(VOID)
//
return;
}
#ifdef _M_IX86
/* x86 fastcall wrappers */
#undef KeRaiseIrql
/*
* @implemented
*/
VOID
NTAPI
KeRaiseIrql(KIRQL NewIrql,
PKIRQL OldIrql)
{
/* Call the fastcall function */
*OldIrql = KfRaiseIrql(NewIrql);
}
#undef KeLowerIrql
/*
* @implemented
*/
VOID
NTAPI
KeLowerIrql(KIRQL NewIrql)
{
/* Call the fastcall function */
KfLowerIrql(NewIrql);
}
#undef KeAcquireSpinLock
/*
* @implemented
*/
VOID
NTAPI
KeAcquireSpinLock(PKSPIN_LOCK SpinLock,
PKIRQL OldIrql)
{
/* Call the fastcall function */
*OldIrql = KfAcquireSpinLock(SpinLock);
}
#undef KeReleaseSpinLock
/*
* @implemented
*/
VOID
NTAPI
KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
KIRQL NewIrql)
{
/* Call the fastcall function */
KfReleaseSpinLock(SpinLock, NewIrql);
}
#endif

View file

@ -8,29 +8,25 @@
/* INCLUDES ******************************************************************/
/* Enable this (and the define in irq.S) to make UP HAL work for MP Kernel */
/* #define CONFIG_SMP */
/* This file is compiled twice. Once for UP and once for MP */
#include <hal.h>
#define NDEBUG
#include <debug.h>
#include <internal/spinlock.h>
#undef KeAcquireSpinLock
#undef KeReleaseSpinLock
/* GLOBALS *******************************************************************/
ULONG HalpSystemHardwareFlags;
KSPIN_LOCK HalpSystemHardwareLock;
/* FUNCTIONS *****************************************************************/
/*
* @implemented
*/
VOID
NTAPI
KeAcquireSpinLock(PKSPIN_LOCK SpinLock,
PKIRQL OldIrql)
{
/* Call the fastcall function */
*OldIrql = KfAcquireSpinLock(SpinLock);
}
#ifdef _M_IX86
/*
* @implemented
@ -49,18 +45,6 @@ KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
return OldIrql;
}
/*
* @implemented
*/
VOID
NTAPI
KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
KIRQL NewIrql)
{
/* Call the fastcall function */
KfReleaseSpinLock(SpinLock, NewIrql);
}
/*
* @implemented
*/
@ -228,27 +212,38 @@ KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
return TRUE;
}
#undef KeRaiseIrql
/*
* @implemented
*/
#endif
VOID
NTAPI
KeRaiseIrql(KIRQL NewIrql,
PKIRQL OldIrql)
HalpAcquireSystemHardwareSpinLock(VOID)
{
/* Call the fastcall function */
*OldIrql = KfRaiseIrql(NewIrql);
ULONG Flags;
/* Get flags and disable interrupts */
Flags = __readeflags();
_disable();
/* Acquire the lock */
KxAcquireSpinLock(&HalpSystemHardwareLock);
/* We have the lock, save the flags now */
HalpSystemHardwareFlags = Flags;
}
#undef KeLowerIrql
/*
* @implemented
*/
VOID
NTAPI
KeLowerIrql(KIRQL NewIrql)
HalpReleaseCmosSpinLock(VOID)
{
/* Call the fastcall function */
KfLowerIrql(NewIrql);
ULONG Flags;
/* Get the flags */
Flags = HalpSystemHardwareFlags;
/* Release the lock */
KxReleaseSpinLock(&HalpSystemHardwareLock);
/* Restore the flags */
__writeeflags(Flags);
}

View file

@ -7,6 +7,9 @@
<define name="_NTHALDLL_" />
<define name="_NTHAL_" />
<define name="CONFIG_SMP" />
<directory name="generic">
<file>spinlock.c</file>
</directory>
<directory name="mp">
<file>apic.c</file>
<file>halinit_mp.c</file>
@ -14,7 +17,6 @@
<file>ipi_mp.c</file>
<file>mpconfig.c</file>
<file>processor_mp.c</file>
<file>spinlock.c</file>
<file>halmp.rc</file>
</directory>
</module>

View file

@ -6,10 +6,12 @@
<include base="ntoskrnl">include</include>
<define name="_NTHALDLL_" />
<define name="_NTHAL_" />
<directory name="generic">
<file>spinlock.c</file>
</directory>
<directory name="generic">
<file>pic.c</file>
<file>processor.c</file>
<file>spinlock.c</file>
</directory>
</module>
</group>

View file

@ -681,100 +681,7 @@ HalpReleaseCmosSpinLock(
VOID
);
//
// This is duplicated from ke_x.h
//
#ifdef CONFIG_SMP
//
// Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* Make sure that we don't own the lock already */
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
{
/* We do, bugcheck! */
KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
}
for (;;)
{
/* Try to acquire it */
if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
{
/* Value changed... wait until it's locked */
while (*(volatile KSPIN_LOCK *)SpinLock == 1)
{
#ifdef DBG
/* On debug builds, we use a much slower but useful routine */
//Kii386SpinOnSpinLock(SpinLock, 5);
/* FIXME: Do normal yield for now */
YieldProcessor();
#else
/* Otherwise, just yield and keep looping */
YieldProcessor();
#endif
}
}
else
{
#ifdef DBG
/* On debug builds, we OR in the KTHREAD */
*SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
#endif
/* All is well, break out */
break;
}
}
}
//
// Spinlock Release at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
#ifdef DBG
/* Make sure that the threads match */
if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
{
/* They don't, bugcheck */
KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
}
#endif
/* Clear the lock */
InterlockedAnd((PLONG)SpinLock, 0);
}
#else
//
// Spinlock Acquire at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
}
//
// Spinlock Release at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
UNREFERENCED_PARAMETER(SpinLock);
}
#endif
VOID
FASTCALL

View file

@ -144,29 +144,6 @@ KfLowerIrql (KIRQL NewIrql)
}
/**********************************************************************
* NAME EXPORTED
* KeLowerIrql
*
* DESCRIPTION
* Restores the irq level on the current processor
*
* ARGUMENTS
* NewIrql = Irql to lower to
*
* RETURN VALUE
* None
*
* NOTES
*/
#undef KeLowerIrql
VOID NTAPI
KeLowerIrql (KIRQL NewIrql)
{
KfLowerIrql (NewIrql);
}
/**********************************************************************
* NAME EXPORTED
* KfRaiseIrql
@ -215,33 +192,6 @@ KfRaiseIrql (KIRQL NewIrql)
return OldIrql;
}
/**********************************************************************
* NAME EXPORTED
* KeRaiseIrql
*
* DESCRIPTION
* Raises the hardware priority (irql)
*
* ARGUMENTS
* NewIrql = Irql to raise to
* OldIrql (OUT) = Caller supplied storage for the previous irql
*
* RETURN VALUE
* None
*
* NOTES
* Calls KfRaiseIrql
*/
#undef KeRaiseIrql
VOID NTAPI
KeRaiseIrql (KIRQL NewIrql,
PKIRQL OldIrql)
{
*OldIrql = KfRaiseIrql (NewIrql);
}
/**********************************************************************
* NAME EXPORTED
* KeRaiseIrqlToDpcLevel

View file

@ -1,201 +0,0 @@
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: hal/halx86/mp/spinlock.c
* PURPOSE: Implements spinlocks
* PROGRAMMER: David Welch (welch@cwcom.net)
* Eric Kohl
* UPDATE HISTORY:
* 09/06/2000 Created
*/
/*
* NOTE: On a uniprocessor machine spinlocks are implemented by raising
* the irq level
*/
/* INCLUDES ****************************************************************/
#include <hal.h>
#define NDEBUG
#include <debug.h>
/* Hmm, needed for KDBG := 1. Why? */
#undef KeGetCurrentIrql
/* FUNCTIONS ***************************************************************/
#undef KeAcquireSpinLock
VOID NTAPI
KeAcquireSpinLock (
PKSPIN_LOCK SpinLock,
PKIRQL OldIrql
)
/*
* FUNCTION: Acquires a spinlock
* ARGUMENTS:
* SpinLock = Spinlock to acquire
* OldIrql (OUT) = Caller supplied storage for the previous irql
*/
{
*OldIrql = KfAcquireSpinLock(SpinLock);
}
KIRQL FASTCALL
KeAcquireSpinLockRaiseToSynch (
PKSPIN_LOCK SpinLock
)
{
KIRQL OldIrql;
OldIrql = KfRaiseIrql(CLOCK2_LEVEL);
KiAcquireSpinLock(SpinLock);
return OldIrql;
}
#undef KeReleaseSpinLock
VOID NTAPI
KeReleaseSpinLock (
PKSPIN_LOCK SpinLock,
KIRQL NewIrql
)
/*
* FUNCTION: Releases a spinlock
* ARGUMENTS:
* SpinLock = Spinlock to release
* NewIrql = Irql level before acquiring the spinlock
*/
{
KfReleaseSpinLock(SpinLock, NewIrql);
}
LOGICAL
FASTCALL
KeTryToAcquireQueuedSpinLock(
KSPIN_LOCK_QUEUE_NUMBER LockNumber,
PKIRQL OldIrql)
{
UNIMPLEMENTED;
return FALSE;
}
BOOLEAN
FASTCALL
KeTryToAcquireQueuedSpinLockRaiseToSynch(
KSPIN_LOCK_QUEUE_NUMBER LockNumber,
PKIRQL OldIrql)
{
UNIMPLEMENTED;
return FALSE;
}
KIRQL FASTCALL
KfAcquireSpinLock (
PKSPIN_LOCK SpinLock
)
{
KIRQL OldIrql;
ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
KiAcquireSpinLock(SpinLock);
return OldIrql;
}
VOID FASTCALL
KfReleaseSpinLock (
PKSPIN_LOCK SpinLock,
KIRQL NewIrql
)
/*
* FUNCTION: Releases a spinlock
* ARGUMENTS:
* SpinLock = Spinlock to release
* NewIrql = Irql level before acquiring the spinlock
*/
{
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL || KeGetCurrentIrql() == SYNCH_LEVEL);
KiReleaseSpinLock(SpinLock);
KfLowerIrql(NewIrql);
}
/*
* @unimplemented
*/
VOID
FASTCALL
KeAcquireInStackQueuedSpinLock(
IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle
)
{
UNIMPLEMENTED;
}
/*
* @unimplemented
*/
VOID
FASTCALL
KeAcquireInStackQueuedSpinLockRaiseToSynch(
IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle
)
{
UNIMPLEMENTED;
}
/*
* @unimplemented
*/
VOID
FASTCALL
KeReleaseInStackQueuedSpinLock(
IN PKLOCK_QUEUE_HANDLE LockHandle
)
{
UNIMPLEMENTED;
}
/*
* @unimplemented
*/
KIRQL
FASTCALL
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER Number)
{
UNIMPLEMENTED;
return 0;
}
/*
* @unimplemented
*/
KIRQL
FASTCALL
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
UNIMPLEMENTED;
return 0;
}
/*
* @unimplemented
*/
VOID
FASTCALL
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER Number,
IN KIRQL OldIrql)
{
UNIMPLEMENTED;
}
/* EOF */