[NTOS]: Move more functions from the i386 ARM3 directory to the portable/shared directory, since they apply to all architectures.

[NTOS]: Refactor the mapping of the PTEs for the PFN database into MiMapPfnDatabase and move the code to portable directory.
[NTOS]: Move and refactor some other definitions, and make some numbers more portable by defining arch-specific subvalues.
[NTOS]: Make the PFN database actually 2 PFN Databases: MmPfnDatabase[0] which is the ReactOS mapping of PHYSICAL_PAGE structures, and MmPfnDatabase[1] which will be the ARM3 mapping of MMPFN structures. The latter is as-of-yet unused, but memory for it is now being reserved.

svn path=/trunk/; revision=45566
This commit is contained in:
Sir Richard 2010-02-11 00:01:32 +00:00
parent dd6b443e89
commit c7f7e3bac7
6 changed files with 367 additions and 288 deletions

View file

@ -365,7 +365,7 @@ typedef struct _MMPFN
} u4;
} MMPFN, *PMMPFN;
extern PMMPFN MmPfnDatabase;
extern PMMPFN MmPfnDatabase[2];
typedef struct _MMPFNLIST
{
@ -1098,7 +1098,7 @@ MiGetPfnEntry(IN PFN_TYPE Pfn)
if ((MiPfnBitMap.Buffer) && !(RtlTestBit(&MiPfnBitMap, Pfn))) return NULL;
/* Get the entry */
Page = &MmPfnDatabase[Pfn];
Page = &MmPfnDatabase[0][Pfn];
/* Make sure it's valid */
ASSERT_PFN(Page);
@ -1114,7 +1114,7 @@ MiGetPfnEntryIndex(IN PMMPFN Pfn1)
//
// This will return the Page Frame Number (PFN) from the MMPFN
//
return Pfn1 - MmPfnDatabase;
return Pfn1 - MmPfnDatabase[0];
}
PFN_TYPE

View file

@ -18,42 +18,10 @@
/* GLOBALS ********************************************************************/
//
// Before we have a PFN database, memory comes straight from our physical memory
// blocks, which is nice because it's guaranteed contiguous and also because once
// we take a page from here, the system doesn't see it anymore.
// However, once the fun is over, those pages must be re-integrated back into
// PFN society life, and that requires us keeping a copy of the original layout
// so that we can parse it later.
//
PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
/* Template PTE and PDE for a kernel page */
MMPTE ValidKernelPde = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
MMPTE ValidKernelPte = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
/*
* For each page's worth bytes of L2 cache in a given set/way line, the zero and
* free lists are organized in what is called a "color".
*
* This array points to the two lists, so it can be thought of as a multi-dimensional
* array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
* we describe the array in pointer form instead.
*
* On a final note, the color tables themselves are right after the PFN database.
*/
C_ASSERT(FreePageList == 1);
PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
/* Make the code cleaner with some definitions for size multiples */
#define _1KB (1024)
#define _1MB (1000 * _1KB)
/* Architecture specific size of a PDE directory, and size of a page table */
#define PDE_SIZE (4096 * sizeof(MMPDE))
#define PT_SIZE (1024 * sizeof(MMPTE))
/* PRIVATE FUNCTIONS **********************************************************/
VOID
@ -171,109 +139,6 @@ MiComputeNonPagedPoolVa(IN ULONG FreePages)
}
}
VOID
NTAPI
MiComputeColorInformation(VOID)
{
ULONG L2Associativity;
/* Check if no setting was provided already */
if (!MmSecondaryColors)
{
/* Get L2 cache information */
L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
/* The number of colors is the number of cache bytes by set/way */
MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
if (L2Associativity) MmSecondaryColors /= L2Associativity;
}
/* Now convert cache bytes into pages */
MmSecondaryColors >>= PAGE_SHIFT;
if (!MmSecondaryColors)
{
/* If there was no cache data from the KPCR, use the default colors */
MmSecondaryColors = MI_SECONDARY_COLORS;
}
else
{
/* Otherwise, make sure there aren't too many colors */
if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
{
/* Set the maximum */
MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
}
/* Make sure there aren't too little colors */
if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
{
/* Set the default */
MmSecondaryColors = MI_SECONDARY_COLORS;
}
/* Finally make sure the colors are a power of two */
if (MmSecondaryColors & (MmSecondaryColors - 1))
{
/* Set the default */
MmSecondaryColors = MI_SECONDARY_COLORS;
}
}
/* Compute the mask and store it */
MmSecondaryColorMask = MmSecondaryColors - 1;
KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
}
VOID
NTAPI
MiInitializeColorTables(VOID)
{
ULONG i;
PMMPTE PointerPte, LastPte;
MMPTE TempPte = ValidKernelPte;
/* The color table starts after the PFN database */
MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
/* Loop the PTEs. We have two color tables for each secondary color */
PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
(2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
- 1);
while (PointerPte <= LastPte)
{
/* Check for valid PTE */
if (PointerPte->u.Hard.Valid == 0)
{
/* Get a page and map it */
TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
ASSERT(TempPte.u.Hard.Valid == 1);
*PointerPte = TempPte;
/* Zero out the page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
/* Next */
PointerPte++;
}
/* Now set the address of the next list, right after this one */
MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
/* Now loop the lists to set them up */
for (i = 0; i < MmSecondaryColors; i++)
{
/* Set both free and zero lists for each color */
MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
MmFreePagesByColor[ZeroedPageList][i].Count = 0;
MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
MmFreePagesByColor[FreePageList][i].Count = 0;
}
}
NTSTATUS
NTAPI
MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
@ -286,7 +151,6 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
MMPTE TempPde, TempPte;
PVOID NonPagedPoolExpansionVa;
ULONG OldCount;
PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
/* Check for kernel stack size that's too big */
if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
@ -436,10 +300,11 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
MiComputeColorInformation();
//
// Calculate the number of bytes for the PFN database, and the color tables,
// and then convert to pages
// Calculate the number of bytes for the PFN database, double it for ARM3,
// then add the color tables and convert to pages
//
MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
MxPfnAllocation <<= 1;
MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
MxPfnAllocation >>= PAGE_SHIFT;
@ -513,13 +378,19 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
// with the old memory manager, so we'll create a "Shadow PFN Database"
// instead, and arbitrarly start it at 0xB0000000.
//
MmPfnDatabase = (PVOID)0xB0000000;
ASSERT(((ULONG_PTR)MmPfnDatabase & ((4 * 1024 * 1024) - 1)) == 0);
// We actually create two PFN databases, one for ReactOS starting here,
// and the next one used for ARM3, which starts right after. The MmPfnAllocation
// variable actually holds the size of both (the colored tables come after
// the ARM3 PFN database).
//
MmPfnDatabase[0] = (PVOID)0xB0000000;
MmPfnDatabase[1] = &MmPfnDatabase[0][MmHighestPhysicalPage];
ASSERT(((ULONG_PTR)MmPfnDatabase[0] & ((4 * 1024 * 1024) - 1)) == 0);
//
// Non paged pool comes after the PFN database
//
MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase[0] +
(MxPfnAllocation << PAGE_SHIFT));
//
@ -567,7 +438,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
// Now we need pages for the page tables which will map initial NP
//
StartPde = MiAddressToPde(MmPfnDatabase);
StartPde = MiAddressToPde(MmPfnDatabase[0]);
EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
MmSizeOfNonPagedPoolInBytes - 1));
while (StartPde <= EndPde)
@ -629,134 +500,16 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
MiInitializeArmPool();
//
// Get current page data, since we won't be using MxGetNextPage as it
// would corrupt our state
//
FreePage = MxFreeDescriptor->BasePage;
FreePageCount = MxFreeDescriptor->PageCount;
PagesLeft = 0;
//
// Loop the memory descriptors
//
NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &KeLoaderBlock->MemoryDescriptorListHead)
{
//
// Get the descriptor
//
MdBlock = CONTAINING_RECORD(NextEntry,
MEMORY_ALLOCATION_DESCRIPTOR,
ListEntry);
if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
(MdBlock->MemoryType == LoaderBBTMemory) ||
(MdBlock->MemoryType == LoaderSpecialMemory))
{
//
// These pages are not part of the PFN database
//
NextEntry = MdBlock->ListEntry.Flink;
continue;
}
//
// Next, check if this is our special free descriptor we've found
//
if (MdBlock == MxFreeDescriptor)
{
//
// Use the real numbers instead
//
BasePage = MxOldFreeDescriptor.BasePage;
PageCount = MxOldFreeDescriptor.PageCount;
}
else
{
//
// Use the descriptor's numbers
//
BasePage = MdBlock->BasePage;
PageCount = MdBlock->PageCount;
}
//
// Get the PTEs for this range
//
PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
//
// Loop them
//
while (PointerPte <= LastPte)
{
//
// We'll only touch PTEs that aren't already valid
//
if (PointerPte->u.Hard.Valid == 0)
{
//
// Use the next free page
//
TempPte.u.Hard.PageFrameNumber = FreePage;
ASSERT(FreePageCount != 0);
//
// Consume free pages
//
FreePage++;
FreePageCount--;
if (!FreePageCount)
{
//
// Out of memory
//
KeBugCheckEx(INSTALL_MORE_MEMORY,
MmNumberOfPhysicalPages,
FreePageCount,
MxOldFreeDescriptor.PageCount,
1);
}
//
// Write out this PTE
//
PagesLeft++;
ASSERT(PointerPte->u.Hard.Valid == 0);
ASSERT(TempPte.u.Hard.Valid == 1);
*PointerPte = TempPte;
//
// Zero this page
//
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
//
// Next!
//
PointerPte++;
}
//
// Do the next address range
//
NextEntry = MdBlock->ListEntry.Flink;
}
//
// Now update the free descriptors to consume the pages we used up during
// the PFN allocation loop
//
MxFreeDescriptor->BasePage = FreePage;
MxFreeDescriptor->PageCount = FreePageCount;
/* Map the PFN database pages */
MiMapPfnDatabase(LoaderBlock);
/* Initialize the color tables */
MiInitializeColorTables();
/* Build the PFN Database */
//MiInitializePfnDatabase(LoaderBlock);
/* Call back into shitMM to setup the PFN database */
/* Call back into shitMM to setup the ReactOS PFN database */
MmInitializePageList();
//

View file

@ -38,6 +38,26 @@
#define MM_HIGHEST_VAD_ADDRESS \
(PVOID)((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (16 * PAGE_SIZE))
/* Make the code cleaner with some definitions for size multiples */
#define _1KB (1024)
#define _1MB (1000 * _1KB)
/* Size of a PDE directory, and size of a page table */
#define PDE_SIZE (PDE_COUNT * sizeof(MMPDE))
#define PT_SIZE (PTE_COUNT * sizeof(MMPTE))
/* Architecture specific count of PDEs in a directory, and count of PTEs in a PT */
#ifdef _M_IX86
#define PD_COUNT 1
#define PDE_COUNT 4096
#define PTE_COUNT 1024
#elif _M_ARM
#define PD_COUNT 1
#define PDE_COUNT 1024
#define PTE_COUNT 256
#else
#error Define these please!
#endif
//
// FIXFIX: These should go in ex.h after the pool merge
@ -190,11 +210,10 @@ extern ULONG MmSecondaryColors;
extern ULONG MmSecondaryColorMask;
extern ULONG MmNumberOfSystemPtes;
extern ULONG MmMaximumNonPagedPoolPercent;
extern ULONG MmLargeStackSize;
//
// Actual (registry-configurable) size of a GUI thread's stack
//
ULONG MmLargeStackSize;
#define MI_PFN_TO_PFNENTRY(x) (&MmPfnDatabase[1][x])
#define MI_PFNENTRY_TO_PFN(x) (Pfn - &MmPfnDatabase[1])
NTSTATUS
NTAPI
@ -209,6 +228,30 @@ MiInitMachineDependent(
IN PLOADER_PARAMETER_BLOCK LoaderBlock
);
VOID
NTAPI
MiComputeColorInformation(
VOID
);
VOID
NTAPI
MiMapPfnDatabase(
IN PLOADER_PARAMETER_BLOCK LoaderBlock
);
VOID
NTAPI
MiInitializeColorTables(
VOID
);
VOID
NTAPI
MiInitializePfnDatabase(
IN PLOADER_PARAMETER_BLOCK LoaderBlock
);
PFN_NUMBER
NTAPI
MxGetNextPage(

View file

@ -234,6 +234,30 @@ ULONG MmSecondaryColorMask;
//
ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
//
// Before we have a PFN database, memory comes straight from our physical memory
// blocks, which is nice because it's guaranteed contiguous and also because once
// we take a page from here, the system doesn't see it anymore.
// However, once the fun is over, those pages must be re-integrated back into
// PFN society life, and that requires us keeping a copy of the original layout
// so that we can parse it later.
//
PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
/*
* For each page's worth bytes of L2 cache in a given set/way line, the zero and
* free lists are organized in what is called a "color".
*
* This array points to the two lists, so it can be thought of as a multi-dimensional
* array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
* we describe the array in pointer form instead.
*
* On a final note, the color tables themselves are right after the PFN database.
*/
C_ASSERT(FreePageList == 1);
PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
/* PRIVATE FUNCTIONS **********************************************************/
//
@ -283,6 +307,264 @@ MxGetNextPage(IN PFN_NUMBER PageCount)
return Pfn;
}
VOID
NTAPI
MiComputeColorInformation(VOID)
{
ULONG L2Associativity;
/* Check if no setting was provided already */
if (!MmSecondaryColors)
{
/* Get L2 cache information */
L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
/* The number of colors is the number of cache bytes by set/way */
MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
if (L2Associativity) MmSecondaryColors /= L2Associativity;
}
/* Now convert cache bytes into pages */
MmSecondaryColors >>= PAGE_SHIFT;
if (!MmSecondaryColors)
{
/* If there was no cache data from the KPCR, use the default colors */
MmSecondaryColors = MI_SECONDARY_COLORS;
}
else
{
/* Otherwise, make sure there aren't too many colors */
if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
{
/* Set the maximum */
MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
}
/* Make sure there aren't too little colors */
if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
{
/* Set the default */
MmSecondaryColors = MI_SECONDARY_COLORS;
}
/* Finally make sure the colors are a power of two */
if (MmSecondaryColors & (MmSecondaryColors - 1))
{
/* Set the default */
MmSecondaryColors = MI_SECONDARY_COLORS;
}
}
/* Compute the mask and store it */
MmSecondaryColorMask = MmSecondaryColors - 1;
KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
}
VOID
NTAPI
MiInitializeColorTables(VOID)
{
ULONG i;
PMMPTE PointerPte, LastPte;
MMPTE TempPte = ValidKernelPte;
/* The color table starts after the ARM3 PFN database */
MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[1][MmHighestPhysicalPage + 1];
/* Loop the PTEs. We have two color tables for each secondary color */
PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
(2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
- 1);
while (PointerPte <= LastPte)
{
/* Check for valid PTE */
if (PointerPte->u.Hard.Valid == 0)
{
/* Get a page and map it */
TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
ASSERT(TempPte.u.Hard.Valid == 1);
*PointerPte = TempPte;
/* Zero out the page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
/* Next */
PointerPte++;
}
/* Now set the address of the next list, right after this one */
MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
/* Now loop the lists to set them up */
for (i = 0; i < MmSecondaryColors; i++)
{
/* Set both free and zero lists for each color */
MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
MmFreePagesByColor[ZeroedPageList][i].Count = 0;
MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
MmFreePagesByColor[FreePageList][i].Count = 0;
}
}
BOOLEAN
NTAPI
MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
IN PFN_NUMBER Pfn)
{
PLIST_ENTRY NextEntry;
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
/* Loop the memory descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
{
/* Get the memory descriptor */
MdBlock = CONTAINING_RECORD(NextEntry,
MEMORY_ALLOCATION_DESCRIPTOR,
ListEntry);
/* Check if this PFN could be part of the block */
if (Pfn >= (MdBlock->BasePage))
{
/* Check if it really is part of the block */
if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
{
/* Check if the block is actually memory we don't map */
if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
(MdBlock->MemoryType == LoaderBBTMemory) ||
(MdBlock->MemoryType == LoaderSpecialMemory))
{
/* We don't need PFN database entries for this memory */
break;
}
/* This is memory we want to map */
return TRUE;
}
}
else
{
/* Blocks are ordered, so if it's not here, it doesn't exist */
break;
}
/* Get to the next descriptor */
NextEntry = MdBlock->ListEntry.Flink;
}
/* Check if this PFN is actually from our free memory descriptor */
if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
(Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
{
/* We use these pages for initial mappings, so we do want to count them */
return TRUE;
}
/* Otherwise this isn't memory that we describe or care about */
return FALSE;
}
VOID
NTAPI
MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
PLIST_ENTRY NextEntry;
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
PMMPTE PointerPte, LastPte;
MMPTE TempPte = ValidKernelPte;
/* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
FreePage = MxFreeDescriptor->BasePage;
FreePageCount = MxFreeDescriptor->PageCount;
PagesLeft = 0;
/* Loop the memory descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
{
/* Get the descriptor */
MdBlock = CONTAINING_RECORD(NextEntry,
MEMORY_ALLOCATION_DESCRIPTOR,
ListEntry);
if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
(MdBlock->MemoryType == LoaderBBTMemory) ||
(MdBlock->MemoryType == LoaderSpecialMemory))
{
/* These pages are not part of the PFN database */
NextEntry = MdBlock->ListEntry.Flink;
continue;
}
/* Next, check if this is our special free descriptor we've found */
if (MdBlock == MxFreeDescriptor)
{
/* Use the real numbers instead */
BasePage = MxOldFreeDescriptor.BasePage;
PageCount = MxOldFreeDescriptor.PageCount;
}
else
{
/* Use the descriptor's numbers */
BasePage = MdBlock->BasePage;
PageCount = MdBlock->PageCount;
}
/* Get the PTEs for this range */
PointerPte = MiAddressToPte(&MmPfnDatabase[0][BasePage]);
LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[0][BasePage + PageCount]) - 1);
DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
/* Loop them */
while (PointerPte <= LastPte)
{
/* We'll only touch PTEs that aren't already valid */
if (PointerPte->u.Hard.Valid == 0)
{
/* Use the next free page */
TempPte.u.Hard.PageFrameNumber = FreePage;
ASSERT(FreePageCount != 0);
/* Consume free pages */
FreePage++;
FreePageCount--;
if (!FreePageCount)
{
/* Out of memory */
KeBugCheckEx(INSTALL_MORE_MEMORY,
MmNumberOfPhysicalPages,
FreePageCount,
MxOldFreeDescriptor.PageCount,
1);
}
/* Write out this PTE */
PagesLeft++;
ASSERT(PointerPte->u.Hard.Valid == 0);
ASSERT(TempPte.u.Hard.Valid == 1);
*PointerPte = TempPte;
/* Zero this page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
/* Next! */
PointerPte++;
}
/* Do the next address range */
NextEntry = MdBlock->ListEntry.Flink;
}
/* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
MxFreeDescriptor->BasePage = FreePage;
MxFreeDescriptor->PageCount = FreePageCount;
}
PFN_NUMBER
NTAPI
MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
@ -788,7 +1070,7 @@ MmArmInitSystem(IN ULONG Phase,
// Sync us up with ReactOS Mm
//
MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
MiSyncARM3WithROS(MmPfnDatabase[0], (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
//

View file

@ -48,7 +48,8 @@
#define PHYSICAL_PAGE MMPFN
#define PPHYSICAL_PAGE PMMPFN
PPHYSICAL_PAGE MmPfnDatabase;
/* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
PPHYSICAL_PAGE MmPfnDatabase[2];
ULONG MmAvailablePages;
ULONG MmResidentAvailablePages;
@ -59,7 +60,7 @@ SIZE_T MmSharedCommit;
SIZE_T MmDriverCommit;
SIZE_T MmProcessCommit;
SIZE_T MmPagedPoolCommit;
SIZE_T MmPeakCommitment;
SIZE_T MmPeakCommitment;
SIZE_T MmtotalCommitLimitMaximum;
MMPFNLIST MmZeroedPageListHead;
@ -101,7 +102,7 @@ MmGetLRUFirstUserPage(VOID)
PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
ASSERT_PFN(PageDescriptor);
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
return PageDescriptor - MmPfnDatabase;
return PageDescriptor - MmPfnDatabase[0];
}
VOID
@ -142,7 +143,7 @@ MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
}
PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
return PageDescriptor - MmPfnDatabase;
return PageDescriptor - MmPfnDatabase[0];
}
VOID
@ -758,9 +759,9 @@ MmInitializePageList(VOID)
for (i = 0; i < Md->PageCount; i++)
{
/* Mark it as a free page */
MmPfnDatabase[Md->BasePage + i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
MmPfnDatabase[0][Md->BasePage + i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
InsertTailList(&FreeUnzeroedPageListHead,
&MmPfnDatabase[Md->BasePage + i].ListEntry);
&MmPfnDatabase[0][Md->BasePage + i].ListEntry);
UnzeroedPageCount++;
MmAvailablePages++;
}
@ -771,7 +772,7 @@ MmInitializePageList(VOID)
for (i = 0; i < Md->PageCount; i++)
{
/* Everything else is used memory */
MmPfnDatabase[Md->BasePage + i] = UsedPage;
MmPfnDatabase[0][Md->BasePage + i] = UsedPage;
NrSystemPages++;
}
}
@ -781,10 +782,10 @@ MmInitializePageList(VOID)
for (i = MxOldFreeDescriptor.BasePage; i < MxFreeDescriptor->BasePage; i++)
{
/* Ensure this page was not added previously */
ASSERT(MmPfnDatabase[i].Flags.Type == 0);
ASSERT(MmPfnDatabase[0][i].Flags.Type == 0);
/* Mark it as used kernel memory */
MmPfnDatabase[i] = UsedPage;
MmPfnDatabase[0][i] = UsedPage;
NrSystemPages++;
}
@ -1082,7 +1083,7 @@ MmAllocPage(ULONG Consumer, SWAPENTRY SwapEntry)
MmAvailablePages--;
PfnOffset = PageDescriptor - MmPfnDatabase;
PfnOffset = PageDescriptor - MmPfnDatabase[0];
if ((NeedClear) && (Consumer != MC_SYSTEM))
{
MiZeroPage(PfnOffset);
@ -1155,7 +1156,7 @@ MmZeroPageThreadMain(PVOID Ignored)
/* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
Pfn = PageDescriptor - MmPfnDatabase;
Pfn = PageDescriptor - MmPfnDatabase[0];
Status = MiZeroPage(Pfn);
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

View file

@ -109,7 +109,7 @@ MiInitSystemMemoryAreas()
//
// Protect the PFN database
//
BaseAddress = MmPfnDatabase;
BaseAddress = MmPfnDatabase[0];
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
@ -292,8 +292,8 @@ MiDbgDumpAddressSpace(VOID)
(ULONG_PTR)MmPagedPoolBase + MmPagedPoolSize,
"Paged Pool");
DPRINT1(" 0x%p - 0x%p\t%s\n",
MmPfnDatabase,
(ULONG_PTR)MmPfnDatabase + (MxPfnAllocation << PAGE_SHIFT),
MmPfnDatabase[0],
(ULONG_PTR)MmPfnDatabase[0] + (MxPfnAllocation << PAGE_SHIFT),
"PFN Database");
DPRINT1(" 0x%p - 0x%p\t%s\n",
MmNonPagedPoolStart,