- This is a HIGH RISK patch. It has been tested on multiple emulators and configurations but requires broader input.

- Implement several changes to PFN database management:
  - The PTEs for the PFN Database are now created by ARM3. Unlike the old code which create PTE for every page on the machine, ARM3 only creates PTEs to account for pages that should be in the PFN database.
  - A second related change is what "pages should be in the PFN database". Previously, reserved or otherwise non-existing (ie: holes) memory regions would get a PFN entry created and marked as "BIOS". This is wasteful and not compatible with Windows: there should not be PFN entries created at all.
    - So we removed BIOS PFN entries, and now only create PTEs for valid pages as listed in the physical memory ranges.
    - This allows machines with "holes" in their physical address space not to waste dozens of MBs of nonpaged pool
    - Also saves memory on regular machines too, since 1-4MB worth of memory will now not be in the DB anymore
  - To keep track of pages that are invalid/unknown/ignored, there is now a "PFN Bitmap". This bitmap has one bit set for each valid PFN in the database.
    - And so, MiGetPfnEntry now also validates that, if there is a PFN Bitmap, the requested PFN is actually present in the database.
  - This introduces a major functional change: device pages, reserved pages, and other BIOS pages cannot be referenced, shared, or managed in any meaningful way.
    - We have attempted to fix parts of the OS that depended on this, but there may still be bugs.
      - A known issue may be an assertion during reboot and/or shutdown in the hyperspace mapping function. It is currently safe to simply "cont" in the debugger a couple of times.
        - We are working on a fix.

svn path=/trunk/; revision=42220
This commit is contained in:
ReactOS Portable Systems Group 2009-07-25 21:35:31 +00:00
parent fba1aea89e
commit 1093de04f2
4 changed files with 234 additions and 79 deletions

View file

@ -1045,10 +1045,14 @@ PMMPFN
MiGetPfnEntry(IN PFN_TYPE Pfn)
{
PMMPFN Page;
extern RTL_BITMAP MiPfnBitMap;
/* Make sure the PFN number is valid */
if (Pfn > MmHighestPhysicalPage) return NULL;
/* Make sure this page actually has a PFN entry */
if ((MiPfnBitMap.Buffer) && !(RtlTestBit(&MiPfnBitMap, Pfn))) return NULL;
/* Get the entry */
Page = &MmPfnDatabase[Pfn];

View file

@ -170,6 +170,20 @@ ULONG MmNumberOfSystemPtes;
//
ULONG MxPfnAllocation;
//
// Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
// of pages that are not actually valid physical memory, such as ACPI reserved
// regions, BIOS address ranges, or holes in physical memory address space which
// could indicate device-mapped I/O memory.
//
// In fact, the lack of a PFN entry for a page usually indicates that this is
// I/O space instead.
//
// A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
// a bit to each. If the bit is set, then the page is valid physical RAM.
//
RTL_BITMAP MiPfnBitMap;
//
// This structure describes the different pieces of RAM-backed address space
//
@ -596,6 +610,9 @@ MmArmInitSystem(IN ULONG Phase,
ULONG OldCount;
BOOLEAN IncludeType[LoaderMaximum];
ULONG i;
PVOID Bitmap;
PPHYSICAL_MEMORY_RUN Run;
PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
BoundaryAddressMultiple.QuadPart = 0;
if (Phase == 0)
@ -1108,9 +1125,138 @@ MmArmInitSystem(IN ULONG Phase,
// Now go ahead and initialize the ARM³ nonpaged pool
//
MiInitializeArmPool();
//
// Get current page data, since we won't be using MxGetNextPage as it
// would corrupt our state
//
FreePage = MxFreeDescriptor->BasePage;
FreePageCount = MxFreeDescriptor->PageCount;
PagesLeft = 0;
//
// Loop the memory descriptors
//
NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &KeLoaderBlock->MemoryDescriptorListHead)
{
//
// Get the descriptor
//
MdBlock = CONTAINING_RECORD(NextEntry,
MEMORY_ALLOCATION_DESCRIPTOR,
ListEntry);
if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
(MdBlock->MemoryType == LoaderBBTMemory) ||
(MdBlock->MemoryType == LoaderSpecialMemory))
{
//
// These pages are not part of the PFN database
//
NextEntry = MdBlock->ListEntry.Flink;
continue;
}
//
// Next, check if this is our special free descriptor we've found
//
if (MdBlock == MxFreeDescriptor)
{
//
// Use the real numbers instead
//
BasePage = MxOldFreeDescriptor.BasePage;
PageCount = MxOldFreeDescriptor.PageCount;
}
else
{
//
// Use the descriptor's numbers
//
BasePage = MdBlock->BasePage;
PageCount = MdBlock->PageCount;
}
//
// Get the PTEs for this range
//
PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
//
// Loop them
//
while (PointerPte <= LastPte)
{
//
// We'll only touch PTEs that aren't already valid
//
if (PointerPte->u.Hard.Valid == 0)
{
//
// Use the next free page
//
TempPte.u.Hard.PageFrameNumber = FreePage;
ASSERT(FreePageCount != 0);
//
// Consume free pages
//
FreePage++;
FreePageCount--;
if (!FreePageCount)
{
//
// Out of memory
//
KeBugCheckEx(INSTALL_MORE_MEMORY,
MmNumberOfPhysicalPages,
FreePageCount,
MxOldFreeDescriptor.PageCount,
1);
}
//
// Write out this PTE
//
PagesLeft++;
ASSERT(PointerPte->u.Hard.Valid == 0);
ASSERT(TempPte.u.Hard.Valid == 1);
*PointerPte = TempPte;
//
// Zero this page
//
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
//
// Next!
//
PointerPte++;
}
//
// Do the next address range
//
NextEntry = MdBlock->ListEntry.Flink;
}
//
// Now update the free descriptors to consume the pages we used up during
// the PFN allocation loop
//
MxFreeDescriptor->BasePage = FreePage;
MxFreeDescriptor->PageCount = FreePageCount;
}
else if (Phase == 1) // IN BETWEEN, THE PFN DATABASE IS NOW CREATED
{
//
// Reset the descriptor back so we can create the correct memory blocks
//
*MxFreeDescriptor = MxOldFreeDescriptor;
//
// Initialize the nonpaged pool
//
@ -1197,16 +1343,57 @@ MmArmInitSystem(IN ULONG Phase,
//
MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
IncludeType);
//
// Allocate enough buffer for the PFN bitmap
// Align it up to a 32-bit boundary
//
Bitmap = ExAllocatePoolWithTag(NonPagedPool,
(((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
' mM');
if (!Bitmap)
{
//
// This is critical
//
KeBugCheckEx(INSTALL_MORE_MEMORY,
MmNumberOfPhysicalPages,
MmLowestPhysicalPage,
MmHighestPhysicalPage,
0x101);
}
//
// Initialize it and clear all the bits to begin with
//
RtlInitializeBitMap(&MiPfnBitMap,
Bitmap,
MmHighestPhysicalPage + 1);
RtlClearAllBits(&MiPfnBitMap);
//
// Loop physical memory runs
//
for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
{
//
// Dump it for debugging
// Get the run
//
PPHYSICAL_MEMORY_RUN Run;
Run = &MmPhysicalMemoryBlock->Run[i];
DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
Run->BasePage << PAGE_SHIFT,
(Run->BasePage + Run->PageCount) << PAGE_SHIFT);
//
// Make sure it has pages inside it
//
if (Run->PageCount)
{
//
// Set the bits in the PFN bitmap
//
RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
}
}
//

View file

@ -133,6 +133,7 @@ extern PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
extern MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
extern ULONG MxPfnAllocation;
extern MM_PAGED_POOL_INFO MmPagedPoolInfo;
extern RTL_BITMAP MiPfnBitMap;
VOID
NTAPI

View file

@ -25,7 +25,6 @@
#define MM_PHYSICAL_PAGE_FREE (0x1)
#define MM_PHYSICAL_PAGE_USED (0x2)
#define MM_PHYSICAL_PAGE_BIOS (0x3)
/* GLOBALS ****************************************************************/
@ -98,6 +97,7 @@ MmInsertLRULastUserPage(PFN_TYPE Pfn)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
ASSERT(Page->Flags.Consumer == MC_USER);
InsertTailList(&UserPageListHead, &Page->ListEntry);
@ -115,6 +115,7 @@ MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(PreviousPfn);
ASSERT(Page);
ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
ASSERT(Page->Flags.Consumer == MC_USER);
NextListEntry = (PLIST_ENTRY)Page->ListEntry.Flink;
@ -495,6 +496,7 @@ MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
// Get the PFN entry for this page
//
Pfn1 = MiGetPfnEntry(Page);
ASSERT(Pfn1);
//
// Make sure it's free and if this is our first pass, zeroed
@ -591,6 +593,7 @@ MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
// Get the PFN entry for the page and check if we should zero it out
//
Pfn1 = MiGetPfnEntry(Page);
ASSERT(Pfn1);
if (Pfn1->Flags.Zero == 0) MiZeroPage(Page);
}
@ -610,7 +613,7 @@ MmDumpPfnDatabase(VOID)
PPHYSICAL_PAGE Pfn1;
PCHAR State = "????", Consumer = "Unknown";
KIRQL OldIrql;
ULONG Totals[5] = {0}, BiosPages = 0, FreePages = 0;
ULONG Totals[5] = {0}, FreePages = 0;
KeRaiseIrql(HIGH_LEVEL, &OldIrql);
@ -620,6 +623,7 @@ MmDumpPfnDatabase(VOID)
for (i = 0; i <= MmHighestPhysicalPage; i++)
{
Pfn1 = MiGetPfnEntry(i);
if (!Pfn1) continue;
//
// Get the consumer
@ -669,13 +673,6 @@ MmDumpPfnDatabase(VOID)
Consumer = "Free";
FreePages++;
break;
case MM_PHYSICAL_PAGE_BIOS:
State = "BIOS";
Consumer = "System Reserved";
BiosPages++;
break;
}
//
@ -696,64 +693,25 @@ MmDumpPfnDatabase(VOID)
DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
DbgPrint("BIOS: %d pages\t[%d KB]\n", BiosPages, (BiosPages << PAGE_SHIFT) / 1024);
DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
KeLowerIrql(OldIrql);
}
PFN_NUMBER
NTAPI
MxGetNextPage(IN PFN_NUMBER PageCount);
VOID
NTAPI
MmInitializePageList(VOID)
{
ULONG i;
NTSTATUS Status;
PFN_TYPE Pfn = 0;
PHYSICAL_PAGE UsedPage;
PLIST_ENTRY NextEntry;
PMEMORY_ALLOCATION_DESCRIPTOR Md;
PLIST_ENTRY NextEntry;
/* Initialize the page lists */
InitializeListHead(&UserPageListHead);
InitializeListHead(&FreeUnzeroedPageListHead);
InitializeListHead(&FreeZeroedPageListHead);
/* Loop every page required to hold the PFN database */
for (i = 0; i < MxPfnAllocation; i++)
{
PVOID Address = (char*)MmPfnDatabase + (i * PAGE_SIZE);
/* Check if FreeLDR has already allocated it for us */
if (!MmIsPagePresent(NULL, Address))
{
/* Use one of our highest usable pages */
Pfn = MxGetNextPage(1);
/* Set the PFN */
Status = MmCreateVirtualMappingForKernel(Address,
PAGE_READWRITE,
&Pfn,
1);
if (!NT_SUCCESS(Status))
{
DPRINT1("Unable to create virtual mapping\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
}
else
{
/* Setting the page protection is necessary to set the global bit */
MmSetPageProtect(NULL, Address, PAGE_READWRITE);
}
}
/* Clear the PFN database */
RtlZeroMemory(MmPfnDatabase, (MmHighestPhysicalPage + 1) * sizeof(PHYSICAL_PAGE));
/* This is what a used page looks like */
RtlZeroMemory(&UsedPage, sizeof(UsedPage));
UsedPage.Flags.Type = MM_PHYSICAL_PAGE_USED;
@ -779,17 +737,10 @@ MmInitializePageList(VOID)
(Md->MemoryType == LoaderSpecialMemory) ||
(Md->MemoryType == LoaderBad))
{
/* Loop every page part of the block but valid in the database */
for (i = 0; i < Md->PageCount; i++)
{
/* Skip memory we ignore completely */
if ((Md->BasePage + i) > MmHighestPhysicalPage) break;
/* These are pages reserved by the BIOS/ROMs */
MmPfnDatabase[Md->BasePage + i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
MmPfnDatabase[Md->BasePage + i].Flags.Consumer = MC_NPPOOL;
MmStats.NrSystemPages++;
}
//
// We do not build PFN entries for this
//
continue;
}
else if ((Md->MemoryType == LoaderFree) ||
(Md->MemoryType == LoaderLoadedProgram) ||
@ -872,6 +823,8 @@ MmMarkPageMapped(PFN_TYPE Pfn)
{
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
if (Page)
{
if (Page->Flags.Type == MM_PHYSICAL_PAGE_FREE)
{
DPRINT1("Mapping non-used page\n");
@ -879,6 +832,7 @@ MmMarkPageMapped(PFN_TYPE Pfn)
}
Page->MapCount++;
Page->ReferenceCount++;
}
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
}
}
@ -894,6 +848,8 @@ MmMarkPageUnmapped(PFN_TYPE Pfn)
{
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
if (Page)
{
if (Page->Flags.Type == MM_PHYSICAL_PAGE_FREE)
{
DPRINT1("Unmapping non-used page\n");
@ -906,6 +862,7 @@ MmMarkPageUnmapped(PFN_TYPE Pfn)
}
Page->MapCount--;
Page->ReferenceCount--;
}
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
}
}
@ -952,6 +909,7 @@ MmReferencePageUnsafe(PFN_TYPE Pfn)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DPRINT1("Referencing non-used page\n");
@ -983,6 +941,7 @@ MmGetReferenceCountPage(PFN_TYPE Pfn)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DPRINT1("Getting reference count for free page\n");
@ -1017,6 +976,7 @@ MmDereferencePage(PFN_TYPE Pfn)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
{
@ -1088,6 +1048,7 @@ MmGetLockCountPage(PFN_TYPE Pfn)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DPRINT1("Getting lock count for free page\n");
@ -1112,6 +1073,7 @@ MmLockPageUnsafe(PFN_TYPE Pfn)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DPRINT1("Locking free page\n");
@ -1143,6 +1105,7 @@ MmUnlockPage(PFN_TYPE Pfn)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DPRINT1("Unlocking free page\n");
@ -1173,7 +1136,7 @@ MmAllocPage(ULONG Consumer, SWAPENTRY SwapEntry)
/* Check if this allocation is for the PFN DB itself */
if (MmStats.NrTotalPages == 0)
{
MxGetNextPage(1);
ASSERT(FALSE);
}
DPRINT1("MmAllocPage(): Out of memory\n");