- Forgot to commit previous HAL changes.

- There is no reason to implement the Interlocked* APIs in assembly in the kernel -- just have them call the intrinsics.
- Also fix the way we were overloading the exported APIs, we now use the intrinsics all the time.
- Convert loader block address to virtual, and parse the addresses inside it and convert them to virtual too, after we don't use them in FreeLDR anymore -- the kernel will delete the p->v identity mapping, and eventually still need to touch the loader block.
- Implement MmGetPhysicalAddress and MiGetUserPageDirectoryCount for ARM.
- We now reach the point where we start initializing the boot drivers' memory information.

svn path=/trunk/; revision=32654
This commit is contained in:
ReactOS Portable Systems Group 2008-03-11 04:42:54 +00:00
parent a6b9a98b82
commit 3d10a2eabc
8 changed files with 342 additions and 279 deletions

View file

@ -26,6 +26,11 @@
#undef ExAcquireFastMutex
#undef ExReleaseFastMutex
#undef ExTryToAcquireFastMutex
#undef KeAcquireSpinLock
#undef KeGetCurrentIrql
#undef KeLowerIrql
#undef KeRaiseIrql
#undef KeReleaseSpinLock
/* DATA **********************************************************************/
@ -730,64 +735,6 @@ IoMapTransfer(
return Address;
}
#undef KeAcquireSpinLock
VOID
NTAPI
KeAcquireSpinLock(
PKSPIN_LOCK SpinLock,
PKIRQL OldIrql)
{
UNIMPLEMENTED;
}
KIRQL
FASTCALL
KeAcquireSpinLockRaiseToSynch(
PKSPIN_LOCK SpinLock)
{
UNIMPLEMENTED;
return 0;
}
/*
* @implemented
*/
VOID
FASTCALL
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
/* Simply raise to dispatch */
LockHandle->OldIrql = KeSwapIrql(DISPATCH_LEVEL);
}
/*
* @implemented
*/
VOID
FASTCALL
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
/* Simply raise to synch */
LockHandle->OldIrql = KeSwapIrql(SYNCH_LEVEL);
}
/*
* @implemented
*/
VOID
FASTCALL
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
{
/* Simply lower IRQL back */
KeSwapIrql(LockHandle->OldIrql);
}
VOID
NTAPI
KeFlushWriteBuffer(VOID)
@ -795,26 +742,6 @@ KeFlushWriteBuffer(VOID)
UNIMPLEMENTED;
}
#undef KeGetCurrentIrql
KIRQL
NTAPI
KeGetCurrentIrql(VOID)
{
UNIMPLEMENTED;
return (KIRQL)0;
}
#undef KeLowerIrql
VOID
NTAPI
KeLowerIrql(
KIRQL NewIrql)
{
UNIMPLEMENTED;
}
LARGE_INTEGER
NTAPI
KeQueryPerformanceCounter(
@ -829,47 +756,6 @@ KeQueryPerformanceCounter(
return Value;
}
#undef KeRaiseIrql
VOID
NTAPI
KeRaiseIrql(
KIRQL NewIrql,
PKIRQL OldIrql)
{
UNIMPLEMENTED;
}
KIRQL
NTAPI
KeRaiseIrqlToDpcLevel(VOID)
{
UNIMPLEMENTED;
return (KIRQL)0;
}
KIRQL
NTAPI
KeRaiseIrqlToSynchLevel(VOID)
{
UNIMPLEMENTED;
return (KIRQL)0;
}
#undef KeReleaseSpinLock
VOID
NTAPI
KeReleaseSpinLock(
PKSPIN_LOCK SpinLock,
KIRQL NewIrql)
{
UNIMPLEMENTED;
}
VOID
NTAPI
KeStallExecutionProcessor(
@ -878,42 +764,6 @@ KeStallExecutionProcessor(
UNIMPLEMENTED;
}
LOGICAL
FASTCALL
KeTryToAcquireQueuedSpinLock(
KSPIN_LOCK_QUEUE_NUMBER LockNumber,
PKIRQL OldIrql)
{
UNIMPLEMENTED;
return FALSE;
}
BOOLEAN
FASTCALL
KeTryToAcquireQueuedSpinLockRaiseToSynch(
KSPIN_LOCK_QUEUE_NUMBER LockNumber,
PKIRQL OldIrql)
{
UNIMPLEMENTED;
return FALSE;
}
KIRQL
FASTCALL
KfAcquireSpinLock(
PKSPIN_LOCK SpinLock)
{
UNIMPLEMENTED;
return (KIRQL)0;
}
VOID
FASTCALL
KfLowerIrql(
@ -933,17 +783,6 @@ KfRaiseIrql(
return (KIRQL)0;
}
VOID
FASTCALL
KfReleaseSpinLock(
PKSPIN_LOCK SpinLock,
KIRQL NewIrql)
{
UNIMPLEMENTED;
}
VOID
NTAPI
READ_PORT_BUFFER_UCHAR(
@ -1071,31 +910,21 @@ WRITE_PORT_USHORT(
}
KIRQL
FASTCALL
KeAcquireQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
{
UNIMPLEMENTED;
return (KIRQL)0;
}
KIRQL
FASTCALL
KeAcquireQueuedSpinLockRaiseToSynch(IN PKLOCK_QUEUE_HANDLE LockHandle)
KeSwapIrql(IN KIRQL Irql)
{
UNIMPLEMENTED;
return (KIRQL)0;
}
VOID
FASTCALL
KeReleaseQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle,
IN KIRQL OldIrql)
{
UNIMPLEMENTED;
return 0;
}
KIRQL
KeSwapIrql(IN KIRQL Irql)
KeRaiseIrqlToDpcLevel(VOID)
{
UNIMPLEMENTED;
return 0;
}
KIRQL
KeRaiseIrqlToSynchLevel(VOID)
{
UNIMPLEMENTED;
return 0;
@ -1163,4 +992,195 @@ HalSweepIcache(VOID)
KeArmFlushIcache();
}
/*
* @implemented
*/
KIRQL
NTAPI
KeGetCurrentIrql(VOID)
{
/* Return IRQL */
return PCR->CurrentIrql;
}
/*
* @implemented
*/
VOID
NTAPI
KeLowerIrql(KIRQL NewIrql)
{
/* Call the fastcall function */
KfLowerIrql(NewIrql);
}
/*
* @implemented
*/
VOID
NTAPI
KeRaiseIrql(KIRQL NewIrql,
PKIRQL OldIrql)
{
/* Call the fastcall function */
*OldIrql = KfRaiseIrql(NewIrql);
}
/*
* @implemented
*/
VOID
NTAPI
KeAcquireSpinLock(PKSPIN_LOCK SpinLock,
PKIRQL OldIrql)
{
/* Call the fastcall function */
*OldIrql = KfAcquireSpinLock(SpinLock);
}
/*
* @implemented
*/
KIRQL
FASTCALL
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
{
/* Simply raise to dispatch */
return KfRaiseIrql(DISPATCH_LEVEL);
}
/*
* @implemented
*/
VOID
NTAPI
KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
KIRQL NewIrql)
{
/* Call the fastcall function */
KfReleaseSpinLock(SpinLock, NewIrql);
}
/*
* @implemented
*/
KIRQL
FASTCALL
KfAcquireSpinLock(PKSPIN_LOCK SpinLock)
{
/* Simply raise to dispatch */
return KfRaiseIrql(DISPATCH_LEVEL);
}
/*
* @implemented
*/
VOID
FASTCALL
KfReleaseSpinLock(PKSPIN_LOCK SpinLock,
KIRQL OldIrql)
{
/* Simply lower IRQL back */
KfLowerIrql(OldIrql);
}
/*
* @implemented
*/
KIRQL
FASTCALL
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
/* Simply raise to dispatch */
return KfRaiseIrql(DISPATCH_LEVEL);
}
/*
* @implemented
*/
KIRQL
FASTCALL
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
/* Simply raise to dispatch */
return KfRaiseIrql(DISPATCH_LEVEL);
}
/*
* @implemented
*/
VOID
FASTCALL
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
/* Simply raise to dispatch */
LockHandle->OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
}
/*
* @implemented
*/
VOID
FASTCALL
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
/* Simply raise to synch */
LockHandle->OldIrql = KfRaiseIrql(SYNCH_LEVEL);
}
/*
* @implemented
*/
VOID
FASTCALL
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
IN KIRQL OldIrql)
{
/* Simply lower IRQL back */
KfLowerIrql(OldIrql);
}
/*
* @implemented
*/
VOID
FASTCALL
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
{
/* Simply lower IRQL back */
KfLowerIrql(LockHandle->OldIrql);
}
/*
* @implemented
*/
BOOLEAN
FASTCALL
KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
IN PKIRQL OldIrql)
{
/* Simply raise to dispatch */
*OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
/* Always return true on UP Machines */
return TRUE;
}
/*
* @implemented
*/
LOGICAL
FASTCALL
KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
OUT PKIRQL OldIrql)
{
/* Simply raise to dispatch */
*OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
/* Always return true on UP Machines */
return TRUE;
}
/* EOF */

View file

@ -0,0 +1,75 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ex/exinrin.c
* PURPOSE: Exported kernel functions which are now intrinsics
* PROGRAMMERS: ReactOS Portable Systems Group
*/
/* INCLUDES *******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#undef InterlockedIncrement
#undef InterlockedDecrement
#undef InterlockedCompareExchange
#undef InterlockedExchangeAdd
#undef InterlockedExchange
/* FUNCTIONS ******************************************************************/
LONG
FASTCALL
InterlockedIncrement(IN LONG volatile *Addend)
{
//
// Call the intrinsic
//
return _InterlockedIncrement(Addend);
}
LONG
FASTCALL
InterlockedDecrement(IN LONG volatile *Addend)
{
//
// Call the intrinsic
//
return _InterlockedDecrement(Addend);
}
LONG
FASTCALL
InterlockedCompareExchange(IN OUT LONG volatile *Destination,
IN LONG Exchange,
IN LONG Comperand)
{
//
// Call the intrinsic
//
return _InterlockedCompareExchange(Destination, Exchange, Comperand);
}
LONG
FASTCALL
InterlockedExchange(IN OUT LONG volatile *Destination,
IN LONG Value)
{
//
// Call the intrinsic
//
return _InterlockedExchange(Destination, Value);
}
LONG
FASTCALL
InterlockedExchangeAdd(IN OUT LONG volatile *Addend,
IN LONG Increment)
{
//
// Call the intrinsic
//
return _InterlockedExchangeAdd(Addend, Increment);
}

View file

@ -558,54 +558,6 @@ _ExpInterlockedPopEntrySListEnd@0:
/* Return */
ret
/*ULONG
*FASTCALL
*InterlockedIncrement(IN PLONG Addend)
*/
.global @InterlockedIncrement@4
@InterlockedIncrement@4:
/* Do the op */
mov eax, 1
LOCK xadd dword ptr [ecx], eax
/* Return */
inc eax
ret
/*ULONG
*FASTCALL
*InterlockedDecrement(IN PLONG Addend)
*/
.global @InterlockedDecrement@4
@InterlockedDecrement@4:
/* Do the op */
mov eax, -1
LOCK xadd dword ptr [ecx], eax
/* Return */
dec eax
ret
/*PVOID
*FASTCALL
*InterlockedCompareExchange(IN OUT PVOID *Destination,
* IN PVOID Exchange,
* IN PVOID Comperand)
*/
.global @InterlockedCompareExchange@12
@InterlockedCompareExchange@12:
/* Get comperand */
mov eax, [esp+4]
/* Do the op */
LOCK cmpxchg dword ptr [ecx], edx
/* Return */
ret 4
/*PVOID
*FASTCALL
*ExfInterlockedCompareExchange64(IN PLONGLONG Destination,
@ -669,21 +621,6 @@ _ExpInterlockedPopEntrySListEnd@0:
/* Return */
ret 8
/*PVOID
*FASTCALL
*InterlockedExchangeAdd(IN OUT PLONG Addend,
* IN LONG Increment)
*/
.global @InterlockedExchangeAdd@8
@InterlockedExchangeAdd@8:
/* Do the op */
LOCK xadd dword ptr [ecx], edx
/* Return */
mov eax, edx
ret
/*** Non-586 functions ***/
/*PSINGLE_LIST_ENTRY

View file

@ -22,6 +22,16 @@
#undef PsGetCurrentProcess
#define PsGetCurrentProcess _PsGetCurrentProcess
//
// We are very lazy on ARM -- we just import intrinsics
// Question: Why wasn't this done for x86 too? (see fastintrlck.asm)
//
#define InterlockedDecrement _InterlockedDecrement
#define InterlockedIncrement _InterlockedIncrement
#define InterlockedCompareExchange _InterlockedCompareExchange
#define InterlockedExchange _InterlockedExchange
#define InterlockedExchangeAdd _InterlockedExchangeAdd
#include "ke.h"
#include "i386/mm.h"
#include "i386/fpu.h"
@ -55,15 +65,6 @@
#include "hal.h"
#include "arch/intrin_i.h"
//
// We are very lazy on ARM -- we just import intrinsics
// Question: Why wasn't this done for x86 too? (see fastintrlck.asm)
//
#define InterlockedDecrement _InterlockedDecrement
#define InterlockedIncrement _InterlockedIncrement
#define InterlockedCompareExchange _InterlockedCompareExchange
#define InterlockedExchangeAdd _InterlockedExchangeAdd
#include <pshpack1.h>
/*
* Defines a descriptor as it appears in the processor tables

View file

@ -31,11 +31,6 @@ GENERATE_ARM_STUB Exfi386InterlockedIncrementLong
GENERATE_ARM_STUB Exi386InterlockedDecrementLong
GENERATE_ARM_STUB Exi386InterlockedExchangeUlong
GENERATE_ARM_STUB Exi386InterlockedIncrementLong
GENERATE_ARM_STUB InterlockedCompareExchange
GENERATE_ARM_STUB InterlockedDecrement
GENERATE_ARM_STUB InterlockedExchange
GENERATE_ARM_STUB InterlockedExchangeAdd
GENERATE_ARM_STUB InterlockedIncrement
GENERATE_ARM_STUB InterlockedPopEntrySList
GENERATE_ARM_STUB InterlockedPushEntrySList

View file

@ -11,6 +11,7 @@
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* GLOBALS ********************************************************************/
#undef UNIMPLEMENTED
@ -551,15 +552,50 @@ MmSetPageProtect(IN PEPROCESS Process,
*/
PHYSICAL_ADDRESS
NTAPI
MmGetPhysicalAddress(IN PVOID vaddr)
MmGetPhysicalAddress(IN PVOID Address)
{
PHYSICAL_ADDRESS p;
PHYSICAL_ADDRESS PhysicalAddress = {{0}};
PMMPTE PointerPte;
DPRINT1("MmGetPhysicalAddress(%lx)\n", Address);
//
// TODO
// Early boot PCR check
//
UNIMPLEMENTED;
return p;
if (Address == PCR)
{
//
// ARM Hack while we still use a section PTE
//
PointerPte = MiGetPdeAddress(PCR);
ASSERT(PointerPte->u.Hard.L1.Section.Type == SectionPte);
PhysicalAddress.QuadPart = PointerPte->u.Hard.L1.Section.BaseAddress;
PhysicalAddress.QuadPart <<= CPT_SHIFT;
DPRINT1("Base: %p\n", PhysicalAddress.LowPart);
PhysicalAddress.LowPart += BYTE_OFFSET(Address);
return PhysicalAddress;
}
//
// Get the PTE
//
PointerPte = MiGetPteAddress(Address);
if (PointerPte->u.Hard.L1.Fault.Type == FaultPte)
{
//
// Invalid address
//
DPRINT1("Address invalid: %p\n", Address);
return PhysicalAddress;
}
//
// Return the information
//
ASSERT(PointerPte->u.Hard.L2.Small.Type != SmallPte);
PhysicalAddress.QuadPart = PointerPte->u.Hard.L2.Small.BaseAddress;
PhysicalAddress.QuadPart <<= PAGE_SHIFT;
PhysicalAddress.LowPart += BYTE_OFFSET(Address);
return PhysicalAddress;
}
PVOID
@ -691,10 +727,9 @@ NTAPI
MiGetUserPageDirectoryCount(VOID)
{
//
// TODO
// Return the index
//
UNIMPLEMENTED;
return 0;
return MiGetPdeOffset(MmSystemRangeStart);
}
VOID

View file

@ -181,6 +181,7 @@
<file>efi.c</file>
<file>event.c</file>
<file>evtpair.c</file>
<file>exintrin.c</file>
<file>fmutex.c</file>
<file>handle.c</file>
<file>harderr.c</file>

View file

@ -1,17 +1,16 @@
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/rtl/libsupp.c
* PURPOSE: RTL Support Routines
* PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
* Gunnar Dalsnes
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/rtl/arm/rtlexcpt.c
* PURPOSE: ARM Exception Helper Routines for Stack Walking
* PROGRAMMERS: ReactOS Portable Systems Group
*/
/* INCLUDES ******************************************************************/
/* INCLUDES *******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <internal/debug.h>
#include <debug.h>
/* FUNCTIONS *****************************************************************/