reactos/ntoskrnl/mm/mminit.c

444 lines
14 KiB
C
Raw Normal View History

/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/mm/mminit.c
* PURPOSE: Memory Manager Initialization
* PROGRAMMERS:
*/
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#define MODULE_INVOLVED_IN_ARM3
#include "ARM3/miarm.h"
/* GLOBALS *******************************************************************/
VOID NTAPI MiInitializeUserPfnBitmap(VOID);
HANDLE MpwThreadHandle;
KEVENT MpwThreadEvent;
BOOLEAN Mm64BitPhysicalAddress = FALSE;
ULONG MmReadClusterSize;
//
// 0 | 1 is on/off paging, 2 is undocumented
//
UCHAR MmDisablePagingExecutive = 1; // Forced to off
PMMPTE MmSharedUserDataPte;
PMMSUPPORT MmKernelAddressSpace;
/* PRIVATE FUNCTIONS *********************************************************/
VOID
INIT_FUNCTION
NTAPI
MiInitSystemMemoryAreas()
{
PVOID BaseAddress;
PHYSICAL_ADDRESS BoundaryAddressMultiple;
PMEMORY_AREA MArea;
NTSTATUS Status;
BoundaryAddressMultiple.QuadPart = 0;
//
// Create the memory area to define the PTE base
//
BaseAddress = (PVOID)PTE_BASE;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
4 * 1024 * 1024,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// Create the memory area to define Hyperspace
//
BaseAddress = (PVOID)HYPER_SPACE;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
4 * 1024 * 1024,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// Protect the PFN database
//
BaseAddress = MmPfnDatabase;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
(MxPfnAllocation << PAGE_SHIFT),
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// ReactOS requires a memory area to keep the initial NP area off-bounds
//
BaseAddress = MmNonPagedPoolStart;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
MmSizeOfNonPagedPoolInBytes,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// And we need one more for the system NP
//
BaseAddress = MmNonPagedSystemStart;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
(ULONG_PTR)MmNonPagedPoolEnd -
(ULONG_PTR)MmNonPagedSystemStart,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// We also need one for system view space
//
BaseAddress = MiSystemViewStart;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
MmSystemViewSize,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// And another for session space
//
BaseAddress = MmSessionBase;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
(ULONG_PTR)MiSessionSpaceEnd -
(ULONG_PTR)MmSessionBase,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// One more for ARM paged pool
//
BaseAddress = MmPagedPoolStart;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
MmSizeOfPagedPoolInBytes,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// Next, the KPCR
//
BaseAddress = (PVOID)PCR;
- Implement support for reading and writing physical memory for KD. The implementation uses a reserved mapping page to map the target physical address to. On x86 this page is located at virtual address 0xFFBFF000, and the PTE for this page is the last PTE of the nonpaged pool's PDE. Other architectures may need to reserve the PTE elsewhere. - The physical memory support relies on several Mm variables and structures to be properly set up. Add a new flag, MiDbgReadyForPhysical, and set it when the debugger support can handle physical memory requests. - Protect this page with a Memory Area to make the old Mm keep its dirty hands off it. - Does not support I/O space or cache flags yet. - Add generic KeInvalidateTlbEntry to invalidate a single TLB entry for a given address instead of flushing the whole TLB. Used by the debugger physical memory support as invalidating the whole TLB for every map and unmap of its debug PTE would incur significant overhead for large copies. Replace direct usage of __invlpg() with this in x86 code too. - Fix incorrect cache flag check and set in KdpRead/WritePhysicalmemory for write combined requests. The debugger's Uncached flag was checked instead of the Write Combined flag, and the debuggers Write Combine number (0x3) was set instead of Mm's flag (0x20). - Fix implementation of MmIsAddressValid (at least for x86; other architectures will need more checks). Just check the Address' PDE and PTE valid bits instead of using Memory Areas. - Add missing ASSERTs to ensure the Memory Areas for paged pool, the PCR page, and the Shared User Data page are created. - Add missing Memory Area for the 2 pages HAL currently uses for its own mappings on x86 -- previously, those pages could have been allocated by other parts of the OS, which would have resulted in serious corruptions. svn path=/trunk/; revision=43960
2009-11-04 22:40:18 +00:00
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
PAGE_SIZE * KeNumberProcessors,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// Now the KUSER_SHARED_DATA
//
BaseAddress = (PVOID)KI_USER_SHARED_DATA;
- Implement support for reading and writing physical memory for KD. The implementation uses a reserved mapping page to map the target physical address to. On x86 this page is located at virtual address 0xFFBFF000, and the PTE for this page is the last PTE of the nonpaged pool's PDE. Other architectures may need to reserve the PTE elsewhere. - The physical memory support relies on several Mm variables and structures to be properly set up. Add a new flag, MiDbgReadyForPhysical, and set it when the debugger support can handle physical memory requests. - Protect this page with a Memory Area to make the old Mm keep its dirty hands off it. - Does not support I/O space or cache flags yet. - Add generic KeInvalidateTlbEntry to invalidate a single TLB entry for a given address instead of flushing the whole TLB. Used by the debugger physical memory support as invalidating the whole TLB for every map and unmap of its debug PTE would incur significant overhead for large copies. Replace direct usage of __invlpg() with this in x86 code too. - Fix incorrect cache flag check and set in KdpRead/WritePhysicalmemory for write combined requests. The debugger's Uncached flag was checked instead of the Write Combined flag, and the debuggers Write Combine number (0x3) was set instead of Mm's flag (0x20). - Fix implementation of MmIsAddressValid (at least for x86; other architectures will need more checks). Just check the Address' PDE and PTE valid bits instead of using Memory Areas. - Add missing ASSERTs to ensure the Memory Areas for paged pool, the PCR page, and the Shared User Data page are created. - Add missing Memory Area for the 2 pages HAL currently uses for its own mappings on x86 -- previously, those pages could have been allocated by other parts of the OS, which would have resulted in serious corruptions. svn path=/trunk/; revision=43960
2009-11-04 22:40:18 +00:00
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
PAGE_SIZE,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
//
// And the debugger mapping
//
BaseAddress = MI_DEBUG_MAPPING;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
PAGE_SIZE,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
#if defined(_X86_)
//
// Finally, reserve the 2 pages we currently make use of for HAL mappings
- Implement support for reading and writing physical memory for KD. The implementation uses a reserved mapping page to map the target physical address to. On x86 this page is located at virtual address 0xFFBFF000, and the PTE for this page is the last PTE of the nonpaged pool's PDE. Other architectures may need to reserve the PTE elsewhere. - The physical memory support relies on several Mm variables and structures to be properly set up. Add a new flag, MiDbgReadyForPhysical, and set it when the debugger support can handle physical memory requests. - Protect this page with a Memory Area to make the old Mm keep its dirty hands off it. - Does not support I/O space or cache flags yet. - Add generic KeInvalidateTlbEntry to invalidate a single TLB entry for a given address instead of flushing the whole TLB. Used by the debugger physical memory support as invalidating the whole TLB for every map and unmap of its debug PTE would incur significant overhead for large copies. Replace direct usage of __invlpg() with this in x86 code too. - Fix incorrect cache flag check and set in KdpRead/WritePhysicalmemory for write combined requests. The debugger's Uncached flag was checked instead of the Write Combined flag, and the debuggers Write Combine number (0x3) was set instead of Mm's flag (0x20). - Fix implementation of MmIsAddressValid (at least for x86; other architectures will need more checks). Just check the Address' PDE and PTE valid bits instead of using Memory Areas. - Add missing ASSERTs to ensure the Memory Areas for paged pool, the PCR page, and the Shared User Data page are created. - Add missing Memory Area for the 2 pages HAL currently uses for its own mappings on x86 -- previously, those pages could have been allocated by other parts of the OS, which would have resulted in serious corruptions. svn path=/trunk/; revision=43960
2009-11-04 22:40:18 +00:00
//
BaseAddress = (PVOID)0xFFC00000;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_OWNED_BY_ARM3 | MEMORY_AREA_STATIC,
&BaseAddress,
PAGE_SIZE * 2,
PAGE_READWRITE,
&MArea,
TRUE,
0,
BoundaryAddressMultiple);
ASSERT(Status == STATUS_SUCCESS);
#endif
}
VOID
NTAPI
INIT_FUNCTION
MiDbgDumpAddressSpace(VOID)
{
//
// Print the memory layout
//
DPRINT1(" 0x%p - 0x%p\t%s\n",
MmSystemRangeStart,
(ULONG_PTR)MmSystemRangeStart + MmBootImageSize,
"Boot Loaded Image");
DPRINT1(" 0x%p - 0x%p\t%s\n",
MmPfnDatabase,
(ULONG_PTR)MmPfnDatabase + (MxPfnAllocation << PAGE_SHIFT),
"PFN Database");
DPRINT1(" 0x%p - 0x%p\t%s\n",
MmNonPagedPoolStart,
(ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes,
"ARM³ Non Paged Pool");
DPRINT1(" 0x%p - 0x%p\t%s\n",
MiSystemViewStart,
(ULONG_PTR)MiSystemViewStart + MmSystemViewSize,
"System View Space");
DPRINT1(" 0x%p - 0x%p\t%s\n",
MmSessionBase,
MiSessionSpaceEnd,
"Session Space");
DPRINT1(" 0x%p - 0x%p\t%s\n",
PTE_BASE, PDE_BASE,
"Page Tables");
DPRINT1(" 0x%p - 0x%p\t%s\n",
PDE_BASE, HYPER_SPACE,
"Page Directories");
DPRINT1(" 0x%p - 0x%p\t%s\n",
HYPER_SPACE, HYPER_SPACE + (4 * 1024 * 1024),
"Hyperspace");
DPRINT1(" 0x%p - 0x%p\t%s\n",
MmPagedPoolStart,
(ULONG_PTR)MmPagedPoolStart + MmSizeOfPagedPoolInBytes,
"ARM³ Paged Pool");
DPRINT1(" 0x%p - 0x%p\t%s\n",
MmNonPagedSystemStart, MmNonPagedPoolExpansionStart,
"System PTE Space");
DPRINT1(" 0x%p - 0x%p\t%s\n",
MmNonPagedPoolExpansionStart, MmNonPagedPoolEnd,
"Non Paged Pool Expansion PTE Space");
}
NTSTATUS NTAPI
MmMpwThreadMain(PVOID Ignored)
{
NTSTATUS Status;
ULONG PagesWritten;
LARGE_INTEGER Timeout;
Timeout.QuadPart = -50000000;
for(;;)
{
Status = KeWaitForSingleObject(&MpwThreadEvent,
0,
KernelMode,
FALSE,
&Timeout);
if (!NT_SUCCESS(Status))
{
DbgPrint("MpwThread: Wait failed\n");
KeBugCheck(MEMORY_MANAGEMENT);
return(STATUS_UNSUCCESSFUL);
}
PagesWritten = 0;
[CACHE] The cache manager rewrite I started years ago has finally appeared in ReactOS' trunk and although at this point it's not quite perfectly integrated, it's enough to boot up the bootcd or livecd. To check out the more mature original, check out arty-newcc-reactos, branch arty-newcc on bitbucket.org . Amine Khaldi encouraged me quite a bit to not give up on it, and was able to reach out and be an advocate when i really wasn't able to. Others agree that the time has come to begin removing the old cache manager. I expect the remaining problems in the version going to trunk will be taken care of relatively quickly. The motivation for this effort lies in the particularly hairy relationship between ReactOS' cache manager and data sections. This code completely removes page sharing between cache manager and section and reimagines cache manager as being a facility layered on the memory manager, not really caring about individual pages, but simply managing data section objects where caching might occur. It took me about 2 years to do the first pass of this rewrite and most of this year to fix some lingering issues, properly implement demand paging in ReactOS (code which didn't come with this patch in a recognizable form), and finish getting the PrivateCacheMap and SharedCacheMap relationship correct. Currently, the new ntoskrnl/cache directory contains an own implementation of data file sections. After things have settled down, we can begin to deprecate and remove the parts of ReactOS' section implementation that depend on a close relationship with cache manager. Eventually, I think that the extra code added to ntoskrnl/cache/section will be removed and ReactOS' own sections will replace the use of the special MM_CACHE_SECTION_SEGMENT in the cache path. Note also, that this makes all cache manager (and new section parts) use wide file offsets. If my section code were to take over other parts of the ReactOS memory manager, they would also benefit from these improvements. I invite anyone who wants to to peek at this code and fix whatever bugs can be found. svn path=/trunk/; revision=49423
2010-11-02 02:32:39 +00:00
#ifndef NEWCC
// XXX arty -- we flush when evicting pages or destorying cache
// sections.
CcRosFlushDirtyPages(128, &PagesWritten);
[CACHE] The cache manager rewrite I started years ago has finally appeared in ReactOS' trunk and although at this point it's not quite perfectly integrated, it's enough to boot up the bootcd or livecd. To check out the more mature original, check out arty-newcc-reactos, branch arty-newcc on bitbucket.org . Amine Khaldi encouraged me quite a bit to not give up on it, and was able to reach out and be an advocate when i really wasn't able to. Others agree that the time has come to begin removing the old cache manager. I expect the remaining problems in the version going to trunk will be taken care of relatively quickly. The motivation for this effort lies in the particularly hairy relationship between ReactOS' cache manager and data sections. This code completely removes page sharing between cache manager and section and reimagines cache manager as being a facility layered on the memory manager, not really caring about individual pages, but simply managing data section objects where caching might occur. It took me about 2 years to do the first pass of this rewrite and most of this year to fix some lingering issues, properly implement demand paging in ReactOS (code which didn't come with this patch in a recognizable form), and finish getting the PrivateCacheMap and SharedCacheMap relationship correct. Currently, the new ntoskrnl/cache directory contains an own implementation of data file sections. After things have settled down, we can begin to deprecate and remove the parts of ReactOS' section implementation that depend on a close relationship with cache manager. Eventually, I think that the extra code added to ntoskrnl/cache/section will be removed and ReactOS' own sections will replace the use of the special MM_CACHE_SECTION_SEGMENT in the cache path. Note also, that this makes all cache manager (and new section parts) use wide file offsets. If my section code were to take over other parts of the ReactOS memory manager, they would also benefit from these improvements. I invite anyone who wants to to peek at this code and fix whatever bugs can be found. svn path=/trunk/; revision=49423
2010-11-02 02:32:39 +00:00
#endif
}
}
NTSTATUS
NTAPI
INIT_FUNCTION
MmInitMpwThread(VOID)
{
KPRIORITY Priority;
NTSTATUS Status;
CLIENT_ID MpwThreadId;
KeInitializeEvent(&MpwThreadEvent, SynchronizationEvent, FALSE);
Status = PsCreateSystemThread(&MpwThreadHandle,
THREAD_ALL_ACCESS,
NULL,
NULL,
&MpwThreadId,
(PKSTART_ROUTINE) MmMpwThreadMain,
NULL);
if (!NT_SUCCESS(Status))
{
return(Status);
}
Priority = 27;
NtSetInformationThread(MpwThreadHandle,
ThreadPriority,
&Priority,
sizeof(Priority));
return(STATUS_SUCCESS);
}
NTSTATUS
NTAPI
INIT_FUNCTION
MmInitBsmThread(VOID)
{
NTSTATUS Status;
OBJECT_ATTRIBUTES ObjectAttributes;
HANDLE ThreadHandle;
/* Create the thread */
InitializeObjectAttributes(&ObjectAttributes, NULL, 0, NULL, NULL);
Status = PsCreateSystemThread(&ThreadHandle,
THREAD_ALL_ACCESS,
&ObjectAttributes,
NULL,
NULL,
KeBalanceSetManager,
NULL);
/* Close the handle and return status */
ZwClose(ThreadHandle);
return Status;
}
BOOLEAN
NTAPI
INIT_FUNCTION
MmInitSystem(IN ULONG Phase,
IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
extern MMPTE ValidKernelPte;
PMMPTE PointerPte;
MMPTE TempPte = ValidKernelPte;
PFN_NUMBER PageFrameNumber;
/* Initialize the kernel address space */
ASSERT(Phase == 1);
KeInitializeGuardedMutex(&PsIdleProcess->AddressCreationLock);
MmKernelAddressSpace = &PsIdleProcess->Vm;
/* Intialize system memory areas */
MiInitSystemMemoryAreas();
/* Dump the address space */
MiDbgDumpAddressSpace();
MmInitGlobalKernelPageDirectory();
MiInitializeUserPfnBitmap();
MmInitializeMemoryConsumer(MC_USER, MmTrimUserMemory);
MmInitializeRmapList();
MmInitializePageOp();
MmInitSectionImplementation();
MmInitPagingFile();
//
// Create a PTE to double-map the shared data section. We allocate it
// from paged pool so that we can't fault when trying to touch the PTE
// itself (to map it), since paged pool addresses will already be mapped
// by the fault handler.
//
MmSharedUserDataPte = ExAllocatePoolWithTag(PagedPool,
sizeof(MMPTE),
' mM');
if (!MmSharedUserDataPte) return FALSE;
//
// Now get the PTE for shared data, and read the PFN that holds it
//
PointerPte = MiAddressToPte((PVOID)KI_USER_SHARED_DATA);
ASSERT(PointerPte->u.Hard.Valid == 1);
PageFrameNumber = PFN_FROM_PTE(PointerPte);
/* Build the PTE and write it */
MI_MAKE_HARDWARE_PTE_KERNEL(&TempPte,
PointerPte,
MM_READONLY,
PageFrameNumber);
*MmSharedUserDataPte = TempPte;
/* Setup the memory threshold events */
if (!MiInitializeMemoryEvents()) return FALSE;
/*
* Unmap low memory
*/
MiInitBalancerThread();
/*
* Initialise the modified page writer.
*/
MmInitMpwThread();
/* Initialize the balance set manager */
MmInitBsmThread();
return TRUE;
}