Remove more dead code from MmInit1. This nugget was trying to free memory allocations between 0x80F50000 and 0x80600000?

[FORMATTING] Make MmInitializePageList readable by human beings.
Remove code in MmInitializePageList which was just repeating what MmInit1 had already done (why?). Remove alignment code which was duplicated (why??).

svn path=/trunk/; revision=32354
This commit is contained in:
ReactOS Portable Systems Group 2008-02-14 04:44:51 +00:00
parent ad3e1b7948
commit 7ea9f649ca
2 changed files with 204 additions and 245 deletions

View file

@ -319,247 +319,216 @@ MmInitializePageList(ULONG_PTR FirstPhysKernelAddress,
ULONG_PTR LastKernelAddress, ULONG_PTR LastKernelAddress,
PADDRESS_RANGE BIOSMemoryMap, PADDRESS_RANGE BIOSMemoryMap,
ULONG AddressRangeCount) ULONG AddressRangeCount)
/*
* FUNCTION: Initializes the page list with all pages free
* except those known to be reserved and those used by the kernel
* ARGUMENTS:
* FirstKernelAddress = First physical address used by the kernel
* LastKernelAddress = Last physical address used by the kernel
*/
{ {
ULONG i; ULONG i;
ULONG Reserved; ULONG Reserved;
NTSTATUS Status; NTSTATUS Status;
PFN_TYPE LastPage; PFN_TYPE LastPage;
PFN_TYPE FirstUninitializedPage; PFN_TYPE FirstUninitializedPage;
ULONG PdeStart = PsGetCurrentProcess()->Pcb.DirectoryTableBase.LowPart; ULONG PdeStart = PsGetCurrentProcess()->Pcb.DirectoryTableBase.LowPart;
DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, " KeInitializeSpinLock(&PageListLock);
"LastPhysKernelAddress %x, " for (i = 0; i < MC_MAXIMUM; i++) InitializeListHead(&UsedPageListHeads[i]);
"MemorySizeInPages %x, LastKernelAddress %x)\n", InitializeListHead(&FreeUnzeroedPageListHead);
FirstPhysKernelAddress, InitializeListHead(&FreeZeroedPageListHead);
LastPhysKernelAddress, InitializeListHead(&BiosPageListHead);
MemorySizeInPages,
LastKernelAddress);
for (i = 0; i < MC_MAXIMUM; i++) LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
{ LastPhysKernelAddress = (ULONG_PTR)PAGE_ROUND_UP(LastPhysKernelAddress);
InitializeListHead(&UsedPageListHeads[i]);
}
KeInitializeSpinLock(&PageListLock);
InitializeListHead(&FreeUnzeroedPageListHead);
InitializeListHead(&FreeZeroedPageListHead);
InitializeListHead(&BiosPageListHead);
LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress); MmPageArraySize = MemorySizeInPages;
MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
MmPageArraySize = MemorySizeInPages; Reserved = PAGE_ROUND_UP((MmPageArraySize * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE;
Reserved = DPRINT("Reserved %d\n", Reserved);
PAGE_ROUND_UP((MmPageArraySize * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE; LastKernelAddress = ((ULONG_PTR)LastKernelAddress + (Reserved * PAGE_SIZE));
MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress; LastPhysKernelAddress = (ULONG_PTR)LastPhysKernelAddress + (Reserved * PAGE_SIZE);
DPRINT("Reserved %d\n", Reserved); /* Preinitialize the Balancer because we need some pages for pte's */
MmInitializeBalancer(MemorySizeInPages, 0);
LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress); FirstUninitializedPage = (ULONG_PTR)LastPhysKernelAddress / PAGE_SIZE;
LastKernelAddress = ((ULONG_PTR)LastKernelAddress + (Reserved * PAGE_SIZE)); LastPage = MmPageArraySize;
LastPhysKernelAddress = (ULONG_PTR)PAGE_ROUND_UP(LastPhysKernelAddress); for (i = 0; i < Reserved; i++)
LastPhysKernelAddress = (ULONG_PTR)LastPhysKernelAddress + (Reserved * PAGE_SIZE); {
PVOID Address = (char*)MmPageArray + (i * PAGE_SIZE);
MmStats.NrTotalPages = 0; ULONG j, start, end;
MmStats.NrSystemPages = 0; if (!MmIsPagePresent(NULL, Address))
MmStats.NrUserPages = 0; {
MmStats.NrReservedPages = 0; PFN_TYPE Pfn;
MmStats.NrFreePages = 0; Pfn = 0;
MmStats.NrLockedPages = 0; while (Pfn == 0 && LastPage > FirstUninitializedPage)
{
/* Preinitialize the Balancer because we need some pages for pte's */ /* Allocate the page from the upper end of the RAM */
MmInitializeBalancer(MemorySizeInPages, 0); if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, --LastPage))
{
FirstUninitializedPage = (ULONG_PTR)LastPhysKernelAddress / PAGE_SIZE; Pfn = LastPage;
LastPage = MmPageArraySize; }
for (i = 0; i < Reserved; i++) }
{
PVOID Address = (char*)MmPageArray + (i * PAGE_SIZE);
ULONG j, start, end;
if (!MmIsPagePresent(NULL, Address))
{
PFN_TYPE Pfn;
Pfn = 0;
while (Pfn == 0 && LastPage > FirstUninitializedPage)
{
/* Allocate the page from the upper end of the RAM */
if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, --LastPage))
{
Pfn = LastPage;
}
}
if (Pfn == 0)
{
Pfn = MmAllocPage(MC_NPPOOL, 0);
if (Pfn == 0) if (Pfn == 0)
{ {
KEBUGCHECK(0); Pfn = MmAllocPage(MC_NPPOOL, 0);
} if (Pfn == 0)
} {
Status = MmCreateVirtualMappingForKernel(Address, KEBUGCHECK(0);
PAGE_READWRITE, }
&Pfn, }
1); Status = MmCreateVirtualMappingForKernel(Address,
if (!NT_SUCCESS(Status)) PAGE_READWRITE,
{ &Pfn,
DPRINT1("Unable to create virtual mapping\n"); 1);
KEBUGCHECK(0); if (!NT_SUCCESS(Status))
} {
} DPRINT1("Unable to create virtual mapping\n");
else KEBUGCHECK(0);
{ }
/* Setting the page protection is necessary to set the global bit on IA32 */ }
MmSetPageProtect(NULL, Address, PAGE_READWRITE); else
} {
memset(Address, 0, PAGE_SIZE); /* Setting the page protection is necessary to set the global bit on IA32 */
MmSetPageProtect(NULL, Address, PAGE_READWRITE);
}
memset(Address, 0, PAGE_SIZE);
start = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray) / sizeof(PHYSICAL_PAGE); start = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray) / sizeof(PHYSICAL_PAGE);
end = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray + PAGE_SIZE) / sizeof(PHYSICAL_PAGE); end = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray + PAGE_SIZE) / sizeof(PHYSICAL_PAGE);
for (j = start; j < end && j < LastPage; j++) for (j = start; j < end && j < LastPage; j++)
{ {
if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, j)) if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, j))
{ {
if (j == 0) if (j == 0)
{ {
/* /*
* Page zero is reserved for the IVT * Page zero is reserved for the IVT
*/ */
MmPageArray[0].Flags.Type = MM_PHYSICAL_PAGE_BIOS; MmPageArray[0].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[0].Flags.Consumer = MC_NPPOOL; MmPageArray[0].Flags.Consumer = MC_NPPOOL;
MmPageArray[0].Flags.Zero = 0; MmPageArray[0].Flags.Zero = 0;
MmPageArray[0].ReferenceCount = 0; MmPageArray[0].ReferenceCount = 0;
InsertTailList(&BiosPageListHead, InsertTailList(&BiosPageListHead,
&MmPageArray[0].ListEntry); &MmPageArray[0].ListEntry);
MmStats.NrReservedPages++; MmStats.NrReservedPages++;
} }
else if (j == 1) else if (j == 1)
{ {
/* /*
* Page one is reserved for the initial KPCR * Page one is reserved for the initial KPCR
*/ */
MmPageArray[1].Flags.Type = MM_PHYSICAL_PAGE_BIOS; MmPageArray[1].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[1].Flags.Consumer = MC_NPPOOL; MmPageArray[1].Flags.Consumer = MC_NPPOOL;
MmPageArray[1].Flags.Zero = 0; MmPageArray[1].Flags.Zero = 0;
MmPageArray[1].ReferenceCount = 0; MmPageArray[1].ReferenceCount = 0;
InsertTailList(&BiosPageListHead, InsertTailList(&BiosPageListHead,
&MmPageArray[1].ListEntry); &MmPageArray[1].ListEntry);
MmStats.NrReservedPages++; MmStats.NrReservedPages++;
} }
else if (j == 2) else if (j == 2)
{ {
/* /*
* Page two is reserved for the KUSER_SHARED_DATA * Page two is reserved for the KUSER_SHARED_DATA
*/ */
MmPageArray[2].Flags.Type = MM_PHYSICAL_PAGE_BIOS; MmPageArray[2].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[2].Flags.Consumer = MC_NPPOOL; MmPageArray[2].Flags.Consumer = MC_NPPOOL;
MmPageArray[2].Flags.Zero = 0; MmPageArray[2].Flags.Zero = 0;
MmPageArray[2].ReferenceCount = 0; MmPageArray[2].ReferenceCount = 0;
InsertTailList(&BiosPageListHead, InsertTailList(&BiosPageListHead,
&MmPageArray[2].ListEntry); &MmPageArray[2].ListEntry);
MmStats.NrReservedPages++; MmStats.NrReservedPages++;
} }
/* Protect the Page Directory. This will be changed in r3 */ /* Protect the Page Directory. This will be changed in r3 */
else if (j >= (PdeStart / PAGE_SIZE) && j < (MmFreeLdrPageDirectoryEnd / PAGE_SIZE)) else if (j >= (PdeStart / PAGE_SIZE) && j < (MmFreeLdrPageDirectoryEnd / PAGE_SIZE))
{ {
MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS; MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[j].Flags.Zero = 0; MmPageArray[j].Flags.Zero = 0;
MmPageArray[j].Flags.Consumer = MC_NPPOOL; MmPageArray[j].Flags.Consumer = MC_NPPOOL;
MmPageArray[j].ReferenceCount = 1; MmPageArray[j].ReferenceCount = 1;
InsertTailList(&BiosPageListHead, InsertTailList(&BiosPageListHead,
&MmPageArray[j].ListEntry); &MmPageArray[j].ListEntry);
MmStats.NrReservedPages++; MmStats.NrReservedPages++;
} }
else if (j >= 0xa0000 / PAGE_SIZE && j < 0x100000 / PAGE_SIZE) else if (j >= 0xa0000 / PAGE_SIZE && j < 0x100000 / PAGE_SIZE)
{ {
MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS; MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[j].Flags.Zero = 0; MmPageArray[j].Flags.Zero = 0;
MmPageArray[j].Flags.Consumer = MC_NPPOOL; MmPageArray[j].Flags.Consumer = MC_NPPOOL;
MmPageArray[j].ReferenceCount = 1; MmPageArray[j].ReferenceCount = 1;
InsertTailList(&BiosPageListHead, InsertTailList(&BiosPageListHead,
&MmPageArray[j].ListEntry); &MmPageArray[j].ListEntry);
MmStats.NrReservedPages++; MmStats.NrReservedPages++;
} }
else if (j >= (ULONG)FirstPhysKernelAddress/PAGE_SIZE && else if (j >= (ULONG)FirstPhysKernelAddress/PAGE_SIZE &&
j < (ULONG)LastPhysKernelAddress/PAGE_SIZE) j < (ULONG)LastPhysKernelAddress/PAGE_SIZE)
{ {
MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_USED; MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_USED;
MmPageArray[j].Flags.Zero = 0; MmPageArray[j].Flags.Zero = 0;
MmPageArray[j].Flags.Consumer = MC_NPPOOL; MmPageArray[j].Flags.Consumer = MC_NPPOOL;
/* Reference count 2, because we're having ReferenceCount track /* Reference count 2, because we're having ReferenceCount track
MapCount as well. */ MapCount as well. */
MmPageArray[j].ReferenceCount = 2; MmPageArray[j].ReferenceCount = 2;
MmPageArray[j].MapCount = 1; MmPageArray[j].MapCount = 1;
InsertTailList(&UsedPageListHeads[MC_NPPOOL], InsertTailList(&UsedPageListHeads[MC_NPPOOL],
&MmPageArray[j].ListEntry); &MmPageArray[j].ListEntry);
MmStats.NrSystemPages++; MmStats.NrSystemPages++;
} }
else else
{ {
MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_FREE; MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_FREE;
MmPageArray[j].Flags.Zero = 0; MmPageArray[j].Flags.Zero = 0;
MmPageArray[j].ReferenceCount = 0; MmPageArray[j].ReferenceCount = 0;
InsertTailList(&FreeUnzeroedPageListHead, InsertTailList(&FreeUnzeroedPageListHead,
&MmPageArray[j].ListEntry); &MmPageArray[j].ListEntry);
UnzeroedPageCount++; UnzeroedPageCount++;
MmStats.NrFreePages++; MmStats.NrFreePages++;
} }
} }
else else
{ {
MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS; MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[j].Flags.Consumer = MC_NPPOOL; MmPageArray[j].Flags.Consumer = MC_NPPOOL;
MmPageArray[j].Flags.Zero = 0; MmPageArray[j].Flags.Zero = 0;
MmPageArray[j].ReferenceCount = 0; MmPageArray[j].ReferenceCount = 0;
InsertTailList(&BiosPageListHead,
&MmPageArray[j].ListEntry);
MmStats.NrReservedPages++;
}
}
FirstUninitializedPage = j;
}
/* Add the pages from the upper end to the list */
for (i = LastPage; i < MmPageArraySize; i++)
{
if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, i))
{
MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
MmPageArray[i].Flags.Zero = 0;
MmPageArray[i].Flags.Consumer = MC_NPPOOL;
MmPageArray[i].ReferenceCount = 2;
MmPageArray[i].MapCount = 1;
InsertTailList(&UsedPageListHeads[MC_NPPOOL],
&MmPageArray[i].ListEntry);
MmStats.NrSystemPages++;
}
else
{
MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[i].Flags.Consumer = MC_NPPOOL;
MmPageArray[i].Flags.Zero = 0;
MmPageArray[i].ReferenceCount = 0;
InsertTailList(&BiosPageListHead, InsertTailList(&BiosPageListHead,
&MmPageArray[j].ListEntry); &MmPageArray[i].ListEntry);
MmStats.NrReservedPages++; MmStats.NrReservedPages++;
} }
} }
FirstUninitializedPage = j;
} KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
/* Add the pages from the upper end to the list */ MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages + MmStats.NrReservedPages + MmStats.NrUserPages;
for (i = LastPage; i < MmPageArraySize; i++) MmInitializeBalancer(MmStats.NrFreePages, MmStats.NrSystemPages + MmStats.NrReservedPages);
{ return((PVOID)LastKernelAddress);
if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, i))
{
MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
MmPageArray[i].Flags.Zero = 0;
MmPageArray[i].Flags.Consumer = MC_NPPOOL;
MmPageArray[i].ReferenceCount = 2;
MmPageArray[i].MapCount = 1;
InsertTailList(&UsedPageListHeads[MC_NPPOOL],
&MmPageArray[i].ListEntry);
MmStats.NrSystemPages++;
}
else
{
MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[i].Flags.Consumer = MC_NPPOOL;
MmPageArray[i].Flags.Zero = 0;
MmPageArray[i].ReferenceCount = 0;
InsertTailList(&BiosPageListHead,
&MmPageArray[i].ListEntry);
MmStats.NrReservedPages++;
}
}
KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
MmStats.NrReservedPages + MmStats.NrUserPages;
MmInitializeBalancer(MmStats.NrFreePages, MmStats.NrSystemPages + MmStats.NrReservedPages);
return((PVOID)LastKernelAddress);
} }
VOID VOID

View file

@ -289,7 +289,6 @@ MmInit1(ULONG_PTR FirstKrnlPhysAddr,
ULONG MaxMem) ULONG MaxMem)
{ {
ULONG kernel_len; ULONG kernel_len;
ULONG_PTR MappingAddress;
PLDR_DATA_TABLE_ENTRY LdrEntry; PLDR_DATA_TABLE_ENTRY LdrEntry;
/* Dump memory descriptors */ /* Dump memory descriptors */
@ -353,15 +352,6 @@ MmInit1(ULONG_PTR FirstKrnlPhysAddr,
/* Unmap low memory */ /* Unmap low memory */
MmDeletePageTable(NULL, 0); MmDeletePageTable(NULL, 0);
/* Unmap FreeLDR's 6MB allocation */
DPRINT("Invalidating between %p and %p\n", LastKernelAddress, KSEG0_BASE + 0x00600000);
for (MappingAddress = LastKernelAddress;
MappingAddress < KSEG0_BASE + 0x00600000;
MappingAddress += PAGE_SIZE)
{
MmRawDeleteVirtualMapping((PVOID)MappingAddress);
}
/* Intialize memory areas */ /* Intialize memory areas */
MmInitVirtualMemory(LastKernelAddress, kernel_len); MmInitVirtualMemory(LastKernelAddress, kernel_len);