reactos/ntoskrnl/mm/i386/page.c

904 lines
23 KiB
C
Raw Normal View History

/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/i386/page.c
* PURPOSE: Low level memory managment manipulation
*
* PROGRAMMERS: David Welch (welch@cwcom.net)
*/
/* INCLUDES ***************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#include <mm/ARM3/miarm.h>
#define ADDR_TO_PDE_OFFSET MiAddressToPdeOffset
#define ADDR_TO_PAGE_TABLE(v) (((ULONG)(v)) / (1024 * PAGE_SIZE))
/* GLOBALS *****************************************************************/
#define PA_BIT_PRESENT (0)
#define PA_BIT_READWRITE (1)
#define PA_BIT_USER (2)
#define PA_BIT_WT (3)
#define PA_BIT_CD (4)
#define PA_BIT_ACCESSED (5)
#define PA_BIT_DIRTY (6)
#define PA_BIT_GLOBAL (8)
#define PA_PRESENT (1 << PA_BIT_PRESENT)
#define PA_READWRITE (1 << PA_BIT_READWRITE)
#define PA_USER (1 << PA_BIT_USER)
#define PA_DIRTY (1 << PA_BIT_DIRTY)
#define PA_WT (1 << PA_BIT_WT)
#define PA_CD (1 << PA_BIT_CD)
#define PA_ACCESSED (1 << PA_BIT_ACCESSED)
#define PA_GLOBAL (1 << PA_BIT_GLOBAL)
#define IS_HYPERSPACE(v) (((ULONG)(v) >= HYPER_SPACE && (ULONG)(v) <= HYPER_SPACE_END))
#define PTE_TO_PFN(X) ((X) >> PAGE_SHIFT)
#define PFN_TO_PTE(X) ((X) << PAGE_SHIFT)
#define PAGE_MASK(x) ((x)&(~0xfff))
const
ULONG
MmProtectToPteMask[32] =
{
//
// These are the base MM_ protection flags
//
0,
PTE_READONLY | PTE_ENABLE_CACHE,
PTE_EXECUTE | PTE_ENABLE_CACHE,
PTE_EXECUTE_READ | PTE_ENABLE_CACHE,
PTE_READWRITE | PTE_ENABLE_CACHE,
PTE_WRITECOPY | PTE_ENABLE_CACHE,
PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE,
PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE,
//
// These OR in the MM_NOCACHE flag
//
0,
PTE_READONLY | PTE_DISABLE_CACHE,
PTE_EXECUTE | PTE_DISABLE_CACHE,
PTE_EXECUTE_READ | PTE_DISABLE_CACHE,
PTE_READWRITE | PTE_DISABLE_CACHE,
PTE_WRITECOPY | PTE_DISABLE_CACHE,
PTE_EXECUTE_READWRITE | PTE_DISABLE_CACHE,
PTE_EXECUTE_WRITECOPY | PTE_DISABLE_CACHE,
//
// These OR in the MM_DECOMMIT flag, which doesn't seem supported on x86/64/ARM
//
0,
PTE_READONLY | PTE_ENABLE_CACHE,
PTE_EXECUTE | PTE_ENABLE_CACHE,
PTE_EXECUTE_READ | PTE_ENABLE_CACHE,
PTE_READWRITE | PTE_ENABLE_CACHE,
PTE_WRITECOPY | PTE_ENABLE_CACHE,
PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE,
PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE,
//
// These OR in the MM_NOACCESS flag, which seems to enable WriteCombining?
//
0,
PTE_READONLY | PTE_WRITECOMBINED_CACHE,
PTE_EXECUTE | PTE_WRITECOMBINED_CACHE,
PTE_EXECUTE_READ | PTE_WRITECOMBINED_CACHE,
PTE_READWRITE | PTE_WRITECOMBINED_CACHE,
PTE_WRITECOPY | PTE_WRITECOMBINED_CACHE,
PTE_EXECUTE_READWRITE | PTE_WRITECOMBINED_CACHE,
PTE_EXECUTE_WRITECOPY | PTE_WRITECOMBINED_CACHE,
};
const
ULONG MmProtectToValue[32] =
{
PAGE_NOACCESS,
PAGE_READONLY,
PAGE_EXECUTE,
PAGE_EXECUTE_READ,
PAGE_READWRITE,
PAGE_WRITECOPY,
PAGE_EXECUTE_READWRITE,
PAGE_EXECUTE_WRITECOPY,
PAGE_NOACCESS,
PAGE_NOCACHE | PAGE_READONLY,
PAGE_NOCACHE | PAGE_EXECUTE,
PAGE_NOCACHE | PAGE_EXECUTE_READ,
PAGE_NOCACHE | PAGE_READWRITE,
PAGE_NOCACHE | PAGE_WRITECOPY,
PAGE_NOCACHE | PAGE_EXECUTE_READWRITE,
PAGE_NOCACHE | PAGE_EXECUTE_WRITECOPY,
PAGE_NOACCESS,
PAGE_GUARD | PAGE_READONLY,
PAGE_GUARD | PAGE_EXECUTE,
PAGE_GUARD | PAGE_EXECUTE_READ,
PAGE_GUARD | PAGE_READWRITE,
PAGE_GUARD | PAGE_WRITECOPY,
PAGE_GUARD | PAGE_EXECUTE_READWRITE,
PAGE_GUARD | PAGE_EXECUTE_WRITECOPY,
PAGE_NOACCESS,
PAGE_WRITECOMBINE | PAGE_READONLY,
PAGE_WRITECOMBINE | PAGE_EXECUTE,
PAGE_WRITECOMBINE | PAGE_EXECUTE_READ,
PAGE_WRITECOMBINE | PAGE_READWRITE,
PAGE_WRITECOMBINE | PAGE_WRITECOPY,
PAGE_WRITECOMBINE | PAGE_EXECUTE_READWRITE,
PAGE_WRITECOMBINE | PAGE_EXECUTE_WRITECOPY
};
/* FUNCTIONS ***************************************************************/
static BOOLEAN MmUnmapPageTable(PULONG Pt, KIRQL OldIrql);
VOID
MiFlushTlb(PULONG Pt, PVOID Address, KIRQL OldIrql)
{
if ((Pt && MmUnmapPageTable(Pt, OldIrql)) || Address >= MmSystemRangeStart)
{
KeInvalidateTlbEntry(Address);
}
}
static ULONG
ProtectToPTE(ULONG flProtect)
{
ULONG Attributes = 0;
if (flProtect & (PAGE_NOACCESS|PAGE_GUARD))
{
Attributes = 0;
}
else if (flProtect & PAGE_IS_WRITABLE)
{
Attributes = PA_PRESENT | PA_READWRITE;
}
else if (flProtect & (PAGE_IS_READABLE | PAGE_IS_EXECUTABLE))
{
Attributes = PA_PRESENT;
}
else
{
DPRINT1("Unknown main protection type.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
if (flProtect & PAGE_SYSTEM)
{
}
else
{
Attributes = Attributes | PA_USER;
}
if (flProtect & PAGE_NOCACHE)
{
Attributes = Attributes | PA_CD;
}
if (flProtect & PAGE_WRITETHROUGH)
{
Attributes = Attributes | PA_WT;
}
return(Attributes);
}
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
NTSTATUS
NTAPI
MiFillSystemPageDirectory(IN PVOID Base,
IN SIZE_T NumberOfBytes);
static PULONG
MmGetPageTableForProcess(PEPROCESS Process, PVOID Address, BOOLEAN Create, PKIRQL OldIrql)
{
PFN_NUMBER Pfn;
PULONG Pt;
PMMPDE PointerPde;
if (Address < MmSystemRangeStart)
{
/* We should have a process for user land addresses */
ASSERT(Process != NULL);
if(Process != PsGetCurrentProcess())
{
PMMPDE PdeBase;
ULONG PdeOffset = MiGetPdeOffset(Address);
ASSERT(!Create);
PdeBase = MiMapPageInHyperSpace(PsGetCurrentProcess(),
PTE_TO_PFN(Process->Pcb.DirectoryTableBase[0]),
OldIrql);
if (PdeBase == NULL)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
PointerPde = PdeBase + PdeOffset;
if (PointerPde->u.Hard.Valid == 0)
{
MiUnmapPageInHyperSpace(PsGetCurrentProcess(), PdeBase, *OldIrql);
return NULL;
}
Pfn = PointerPde->u.Hard.PageFrameNumber;
MiUnmapPageInHyperSpace(PsGetCurrentProcess(), PdeBase, *OldIrql);
Pt = MiMapPageInHyperSpace(PsGetCurrentProcess(), Pfn, OldIrql);
if (Pt == NULL)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
return Pt + MiAddressToPteOffset(Address);
}
/* This is for our process */
PointerPde = MiAddressToPde(Address);
Pt = (PULONG)MiAddressToPte(Address);
if ((PointerPde->u.Hard.Valid == 0) && (Create == FALSE))
{
/* Do not fault PDE in if not needed */
return NULL;
}
return (PULONG)MiAddressToPte(Address);
}
/* This is for kernel land address */
ASSERT(Process == NULL);
PointerPde = MiAddressToPde(Address);
Pt = (PULONG)MiAddressToPte(Address);
if (PointerPde->u.Hard.Valid == 0)
{
/* Let ARM3 synchronize the PDE */
if(!MiSynchronizeSystemPde(PointerPde))
{
/* PDE (still) not valid, let ARM3 allocate one if asked */
if(Create == FALSE)
return NULL;
MiFillSystemPageDirectory(Address, PAGE_SIZE);
}
}
return Pt;
}
static BOOLEAN MmUnmapPageTable(PULONG Pt, KIRQL OldIrql)
{
if (!IS_HYPERSPACE(Pt))
{
return TRUE;
}
MiUnmapPageInHyperSpace(PsGetCurrentProcess(), Pt, OldIrql);
return FALSE;
}
static ULONG MmGetPageEntryForProcess(PEPROCESS Process, PVOID Address)
{
ULONG Pte;
PULONG Pt;
KIRQL OldIrql;
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt)
{
Pte = *Pt;
MmUnmapPageTable(Pt, OldIrql);
return Pte;
}
return 0;
}
PFN_NUMBER
NTAPI
MmGetPfnForProcess(PEPROCESS Process,
PVOID Address)
{
ULONG Entry;
Entry = MmGetPageEntryForProcess(Process, Address);
if (!(Entry & PA_PRESENT))
{
return 0;
}
return(PTE_TO_PFN(Entry));
}
VOID
NTAPI
MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address,
BOOLEAN* WasDirty, PPFN_NUMBER Page)
/*
* FUNCTION: Delete a virtual mapping
*/
{
BOOLEAN WasValid = FALSE;
PFN_NUMBER Pfn;
ULONG Pte;
PULONG Pt;
KIRQL OldIrql;
DPRINT("MmDeleteVirtualMapping(%p, %p, %p, %p)\n",
Process, Address, WasDirty, Page);
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt == NULL)
{
if (WasDirty != NULL)
{
*WasDirty = FALSE;
}
if (Page != NULL)
{
*Page = 0;
}
return;
}
/*
* Atomically set the entry to zero and get the old value.
*/
Pte = InterlockedExchangePte(Pt, 0);
/* We count a mapping as valid if it's a present page, or it's a nonzero pfn with
* the swap bit unset, indicating a valid page protected to PAGE_NOACCESS. */
WasValid = (Pte & PA_PRESENT) || ((Pte >> PAGE_SHIFT) && !(Pte & 0x800));
if (WasValid)
{
/* Flush the TLB since we transitioned this PTE
* from valid to invalid so any stale translations
* are removed from the cache */
MiFlushTlb(Pt, Address, OldIrql);
if (Address < MmSystemRangeStart)
{
/* Remove PDE reference */
Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--;
ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_PER_PAGE);
}
Pfn = PTE_TO_PFN(Pte);
}
else
{
MmUnmapPageTable(Pt, OldIrql);
Pfn = 0;
}
/*
* Return some information to the caller
*/
if (WasDirty != NULL)
{
*WasDirty = ((Pte & PA_DIRTY) && (Pte & PA_PRESENT)) ? TRUE : FALSE;
}
if (Page != NULL)
{
*Page = Pfn;
}
}
[CACHE] The cache manager rewrite I started years ago has finally appeared in ReactOS' trunk and although at this point it's not quite perfectly integrated, it's enough to boot up the bootcd or livecd. To check out the more mature original, check out arty-newcc-reactos, branch arty-newcc on bitbucket.org . Amine Khaldi encouraged me quite a bit to not give up on it, and was able to reach out and be an advocate when i really wasn't able to. Others agree that the time has come to begin removing the old cache manager. I expect the remaining problems in the version going to trunk will be taken care of relatively quickly. The motivation for this effort lies in the particularly hairy relationship between ReactOS' cache manager and data sections. This code completely removes page sharing between cache manager and section and reimagines cache manager as being a facility layered on the memory manager, not really caring about individual pages, but simply managing data section objects where caching might occur. It took me about 2 years to do the first pass of this rewrite and most of this year to fix some lingering issues, properly implement demand paging in ReactOS (code which didn't come with this patch in a recognizable form), and finish getting the PrivateCacheMap and SharedCacheMap relationship correct. Currently, the new ntoskrnl/cache directory contains an own implementation of data file sections. After things have settled down, we can begin to deprecate and remove the parts of ReactOS' section implementation that depend on a close relationship with cache manager. Eventually, I think that the extra code added to ntoskrnl/cache/section will be removed and ReactOS' own sections will replace the use of the special MM_CACHE_SECTION_SEGMENT in the cache path. Note also, that this makes all cache manager (and new section parts) use wide file offsets. If my section code were to take over other parts of the ReactOS memory manager, they would also benefit from these improvements. I invite anyone who wants to to peek at this code and fix whatever bugs can be found. svn path=/trunk/; revision=49423
2010-11-02 02:32:39 +00:00
VOID
NTAPI
MmGetPageFileMapping(PEPROCESS Process, PVOID Address,
SWAPENTRY* SwapEntry)
[CACHE] The cache manager rewrite I started years ago has finally appeared in ReactOS' trunk and although at this point it's not quite perfectly integrated, it's enough to boot up the bootcd or livecd. To check out the more mature original, check out arty-newcc-reactos, branch arty-newcc on bitbucket.org . Amine Khaldi encouraged me quite a bit to not give up on it, and was able to reach out and be an advocate when i really wasn't able to. Others agree that the time has come to begin removing the old cache manager. I expect the remaining problems in the version going to trunk will be taken care of relatively quickly. The motivation for this effort lies in the particularly hairy relationship between ReactOS' cache manager and data sections. This code completely removes page sharing between cache manager and section and reimagines cache manager as being a facility layered on the memory manager, not really caring about individual pages, but simply managing data section objects where caching might occur. It took me about 2 years to do the first pass of this rewrite and most of this year to fix some lingering issues, properly implement demand paging in ReactOS (code which didn't come with this patch in a recognizable form), and finish getting the PrivateCacheMap and SharedCacheMap relationship correct. Currently, the new ntoskrnl/cache directory contains an own implementation of data file sections. After things have settled down, we can begin to deprecate and remove the parts of ReactOS' section implementation that depend on a close relationship with cache manager. Eventually, I think that the extra code added to ntoskrnl/cache/section will be removed and ReactOS' own sections will replace the use of the special MM_CACHE_SECTION_SEGMENT in the cache path. Note also, that this makes all cache manager (and new section parts) use wide file offsets. If my section code were to take over other parts of the ReactOS memory manager, they would also benefit from these improvements. I invite anyone who wants to to peek at this code and fix whatever bugs can be found. svn path=/trunk/; revision=49423
2010-11-02 02:32:39 +00:00
/*
* FUNCTION: Get a page file mapping
*/
{
ULONG Entry = MmGetPageEntryForProcess(Process, Address);
*SwapEntry = Entry >> 1;
[CACHE] The cache manager rewrite I started years ago has finally appeared in ReactOS' trunk and although at this point it's not quite perfectly integrated, it's enough to boot up the bootcd or livecd. To check out the more mature original, check out arty-newcc-reactos, branch arty-newcc on bitbucket.org . Amine Khaldi encouraged me quite a bit to not give up on it, and was able to reach out and be an advocate when i really wasn't able to. Others agree that the time has come to begin removing the old cache manager. I expect the remaining problems in the version going to trunk will be taken care of relatively quickly. The motivation for this effort lies in the particularly hairy relationship between ReactOS' cache manager and data sections. This code completely removes page sharing between cache manager and section and reimagines cache manager as being a facility layered on the memory manager, not really caring about individual pages, but simply managing data section objects where caching might occur. It took me about 2 years to do the first pass of this rewrite and most of this year to fix some lingering issues, properly implement demand paging in ReactOS (code which didn't come with this patch in a recognizable form), and finish getting the PrivateCacheMap and SharedCacheMap relationship correct. Currently, the new ntoskrnl/cache directory contains an own implementation of data file sections. After things have settled down, we can begin to deprecate and remove the parts of ReactOS' section implementation that depend on a close relationship with cache manager. Eventually, I think that the extra code added to ntoskrnl/cache/section will be removed and ReactOS' own sections will replace the use of the special MM_CACHE_SECTION_SEGMENT in the cache path. Note also, that this makes all cache manager (and new section parts) use wide file offsets. If my section code were to take over other parts of the ReactOS memory manager, they would also benefit from these improvements. I invite anyone who wants to to peek at this code and fix whatever bugs can be found. svn path=/trunk/; revision=49423
2010-11-02 02:32:39 +00:00
}
VOID
NTAPI
MmDeletePageFileMapping(PEPROCESS Process, PVOID Address,
SWAPENTRY* SwapEntry)
/*
* FUNCTION: Delete a virtual mapping
*/
{
ULONG Pte;
PULONG Pt;
KIRQL OldIrql;
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt == NULL)
{
*SwapEntry = 0;
return;
}
/*
* Atomically set the entry to zero and get the old value.
*/
Pte = InterlockedExchangePte(Pt, 0);
if (Address < MmSystemRangeStart)
{
/* Remove PDE reference */
Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--;
ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_PER_PAGE);
}
/* We don't need to flush here because page file entries
* are invalid translations, so the processor won't cache them */
MmUnmapPageTable(Pt, OldIrql);
if ((Pte & PA_PRESENT) || !(Pte & 0x800))
{
DPRINT1("Pte %x (want not 1 and 0x800)\n", Pte);
KeBugCheck(MEMORY_MANAGEMENT);
}
/*
* Return some information to the caller
*/
*SwapEntry = Pte >> 1;
}
BOOLEAN
Mmi386MakeKernelPageTableGlobal(PVOID Address)
{
PMMPDE PointerPde = MiAddressToPde(Address);
PMMPTE PointerPte = MiAddressToPte(Address);
if (PointerPde->u.Hard.Valid == 0)
{
if(!MiSynchronizeSystemPde(PointerPde))
return FALSE;
return PointerPte->u.Hard.Valid != 0;
}
return FALSE;
}
BOOLEAN
NTAPI
MmIsDirtyPage(PEPROCESS Process, PVOID Address)
{
return MmGetPageEntryForProcess(Process, Address) & PA_DIRTY ? TRUE : FALSE;
}
VOID
NTAPI
MmSetCleanPage(PEPROCESS Process, PVOID Address)
{
PULONG Pt;
ULONG Pte;
KIRQL OldIrql;
if (Address < MmSystemRangeStart && Process == NULL)
{
DPRINT1("MmSetCleanPage is called for user space without a process.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt == NULL)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
do
{
Pte = *Pt;
} while (Pte != InterlockedCompareExchangePte(Pt, Pte & ~PA_DIRTY, Pte));
if (!(Pte & PA_PRESENT))
{
KeBugCheck(MEMORY_MANAGEMENT);
}
else if (Pte & PA_DIRTY)
{
MiFlushTlb(Pt, Address, OldIrql);
}
else
{
MmUnmapPageTable(Pt, OldIrql);
}
}
VOID
NTAPI
MmSetDirtyPage(PEPROCESS Process, PVOID Address)
{
PULONG Pt;
ULONG Pte;
KIRQL OldIrql;
if (Address < MmSystemRangeStart && Process == NULL)
{
DPRINT1("MmSetDirtyPage is called for user space without a process.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt == NULL)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
do
{
Pte = *Pt;
} while (Pte != InterlockedCompareExchangePte(Pt, Pte | PA_DIRTY, Pte));
if (!(Pte & PA_PRESENT))
{
KeBugCheck(MEMORY_MANAGEMENT);
}
else
{
/* The processor will never clear this bit itself, therefore
* we do not need to flush the TLB here when setting it */
MmUnmapPageTable(Pt, OldIrql);
}
}
VOID
NTAPI
MmClearPageAccessedBit(PEPROCESS Process, PVOID Address)
{
PULONG Pt;
LONG Pte;
KIRQL OldIrql;
if (Address < MmSystemRangeStart && Process == NULL)
{
DPRINT1("MmClearPageAccessedBit is called for user space without a process.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt == NULL)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
do
{
Pte = *Pt;
} while (Pte != InterlockedCompareExchangePte(Pt, Pte & ~PA_ACCESSED, Pte));
if (!(Pte & PA_PRESENT))
{
KeBugCheck(MEMORY_MANAGEMENT);
}
MiFlushTlb(Pt, Address, OldIrql);
}
BOOLEAN
NTAPI
MmIsPageAccessed(PEPROCESS Process, PVOID Address)
{
return BooleanFlagOn(MmGetPageEntryForProcess(Process, Address), PA_ACCESSED);
}
BOOLEAN
NTAPI
MmIsPagePresent(PEPROCESS Process, PVOID Address)
{
return MmGetPageEntryForProcess(Process, Address) & PA_PRESENT;
}
BOOLEAN
NTAPI
MmIsDisabledPage(PEPROCESS Process, PVOID Address)
{
ULONG_PTR Entry = MmGetPageEntryForProcess(Process, Address);
return !(Entry & PA_PRESENT) && !(Entry & 0x800) && (Entry >> PAGE_SHIFT);
}
BOOLEAN
NTAPI
MmIsPageSwapEntry(PEPROCESS Process, PVOID Address)
{
ULONG Entry;
Entry = MmGetPageEntryForProcess(Process, Address);
return !(Entry & PA_PRESENT) && (Entry & 0x800);
}
NTSTATUS
NTAPI
MmCreatePageFileMapping(PEPROCESS Process,
PVOID Address,
SWAPENTRY SwapEntry)
{
PULONG Pt;
ULONG Pte;
KIRQL OldIrql;
if (Process == NULL && Address < MmSystemRangeStart)
{
DPRINT1("No process\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
if (Process != NULL && Address >= MmSystemRangeStart)
{
DPRINT1("Setting kernel address with process context\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
if (SwapEntry & (1 << 31))
{
KeBugCheck(MEMORY_MANAGEMENT);
}
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt == NULL)
{
/* Nobody should page out an address that hasn't even been mapped */
/* But we might place a wait entry first, requiring the page table */
if (SwapEntry != MM_WAIT_ENTRY)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
Pt = MmGetPageTableForProcess(Process, Address, TRUE, &OldIrql);
}
Pte = InterlockedExchangePte(Pt, SwapEntry << 1);
if (Pte != 0)
{
KeBugCheckEx(MEMORY_MANAGEMENT, SwapEntry, (ULONG_PTR)Process, (ULONG_PTR)Address, 0);
}
if (Address < MmSystemRangeStart)
{
/* Add PDE reference */
Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]++;
ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] <= PTE_PER_PAGE);
}
/* We don't need to flush the TLB here because it
* only caches valid translations and a zero PTE
* is not a valid translation */
MmUnmapPageTable(Pt, OldIrql);
return(STATUS_SUCCESS);
}
NTSTATUS
NTAPI
MmCreateVirtualMappingUnsafe(PEPROCESS Process,
PVOID Address,
ULONG flProtect,
PPFN_NUMBER Pages,
ULONG PageCount)
{
ULONG Attributes;
PVOID Addr;
ULONG i;
ULONG oldPdeOffset, PdeOffset;
PULONG Pt = NULL;
ULONG Pte;
KIRQL OldIrql;
DPRINT("MmCreateVirtualMappingUnsafe(%p, %p, %lu, %p (%x), %lu)\n",
Process, Address, flProtect, Pages, *Pages, PageCount);
ASSERT(((ULONG_PTR)Address % PAGE_SIZE) == 0);
if (Process == NULL)
{
if (Address < MmSystemRangeStart)
{
DPRINT1("NULL process given for user-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages);
KeBugCheck(MEMORY_MANAGEMENT);
}
if (PageCount > 0x10000 ||
(ULONG_PTR) Address / PAGE_SIZE + PageCount > 0x100000)
{
DPRINT1("Page count too large for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages);
KeBugCheck(MEMORY_MANAGEMENT);
}
}
else
{
if (Address >= MmSystemRangeStart)
{
DPRINT1("Process %p given for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages);
KeBugCheck(MEMORY_MANAGEMENT);
}
if (PageCount > (ULONG_PTR)MmSystemRangeStart / PAGE_SIZE ||
(ULONG_PTR) Address / PAGE_SIZE + PageCount >
(ULONG_PTR)MmSystemRangeStart / PAGE_SIZE)
{
DPRINT1("Page count too large for process %p user-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages);
KeBugCheck(MEMORY_MANAGEMENT);
}
}
Attributes = ProtectToPTE(flProtect);
Attributes &= 0xfff;
if (Address >= MmSystemRangeStart)
{
Attributes &= ~PA_USER;
}
else
{
Attributes |= PA_USER;
}
Addr = Address;
/* MmGetPageTableForProcess should be called on the first run, so
* let this trigger it */
oldPdeOffset = ADDR_TO_PDE_OFFSET(Addr) + 1;
for (i = 0; i < PageCount; i++, Addr = (PVOID)((ULONG_PTR)Addr + PAGE_SIZE))
{
if (!(Attributes & PA_PRESENT) && Pages[i] != 0)
{
DPRINT1("Setting physical address but not allowing access at address "
"0x%p with attributes %x/%x.\n",
Addr, Attributes, flProtect);
KeBugCheck(MEMORY_MANAGEMENT);
}
PdeOffset = ADDR_TO_PDE_OFFSET(Addr);
if (oldPdeOffset != PdeOffset)
{
if(Pt) MmUnmapPageTable(Pt, OldIrql);
Pt = MmGetPageTableForProcess(Process, Addr, TRUE, &OldIrql);
if (Pt == NULL)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
}
else
{
Pt++;
}
oldPdeOffset = PdeOffset;
Pte = InterlockedExchangePte(Pt, PFN_TO_PTE(Pages[i]) | Attributes);
/* There should not be anything valid here */
if (Pte != 0)
{
DPRINT1("Bad PTE %lx at %p for %p + %lu\n", Pte, Pt, Address, i);
KeBugCheck(MEMORY_MANAGEMENT);
}
/* We don't need to flush the TLB here because it only caches valid translations
* and we're moving this PTE from invalid to valid so it can't be cached right now */
if (Addr < MmSystemRangeStart)
{
/* Add PDE reference */
Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)]++;
ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)] <= PTE_PER_PAGE);
}
}
ASSERT(Addr > Address);
MmUnmapPageTable(Pt, OldIrql);
return(STATUS_SUCCESS);
}
NTSTATUS
NTAPI
MmCreateVirtualMapping(PEPROCESS Process,
PVOID Address,
ULONG flProtect,
PPFN_NUMBER Pages,
ULONG PageCount)
{
ULONG i;
[NEWCC] A reintegration checkpoint for the NewCC branch, brought to you by Team NewCC. Differences with current ReactOS trunk: * A new memory area type, MEMORY_AREA_CACHE, is added, which represents a mapped region of a file. In NEWCC mode, user sections are MEMORY_AREA_CACHE type as well, and obey new semantics. In non-NEWCC mode, they aren't used. * A way of claiming a page entry for a specific thread's work is added. Placing the special SWAPENTRY value MM_WAIT_ENTRY in a page table, or in a section page table should indicate that memory management code is intended to wait for another thread to make some status change before checking the state of the page entry again. In code that uses this convention, a return value of STATUS_SUCCESS + 1 is used to indicate that the caller should use the MiWaitForPageEvent macro to wait until somebody has change the state of a wait entry before checking again. This is a lighter weight mechanism than PAGEOPs. * A way of asking the caller to perform some blocking operation without locks held is provided. This replaces some spaghettified code in which locks are repeatedly taken and broken by code that performs various blocking operations. Using this mechanism, it is possible to do a small amount of non-blocking work, fill in a request, then return STATUS_MORE_PROCESSING_REQUIRED to request that locks be dropped and the blocking operation be carried out. A MM_REQUIRED_RESOURCES structure is provided to consumers of this contract to use to accumulate state across many blocking operations. Several functions wrapping blocking operations are provided in ntoskrnl/cache/reqtools.c. * Image section pages are no longer direct mapped. This is done to simplify consolidation of ownership of pages under the data section system. At a later time, it may be possible to make data pages directly available to image sections for the same file. This is likely the only direct performance impact this code makes on non-NEWCC mode. RMAPs: * A new type of RMAP entry is introduced, distinguished by RMAP_IS_SEGMENT(Address) of the rmap entry. This kind of entry contains a pointer to a section page table node in the Process pointer, which in turn links back to the MM_SECTION_SEGMENT it belongs to. Therefore, a page belonging only to a segment (that is, a segment page that isn't mapped) can exist and be evicted using the normal page eviction mechanism in balance.c. Each of the rmap function has been modified to deal with segment rmaps. * The low 8 bits of the Address field in a segment rmap denote the entry number in the generic table node pointed to by Process that points to the page the rmap belongs to. By combining them, you can determine the file offset the page belongs to. * In NEWCC mode, MmSharePageEntry/UnsharePageEntry are not used, and instead the page reference count is used to keep track of the number of mappings of a page, allowing the last reference expiring to allow the page to be recycled without much intervention. These are still used in non-NEWCC mode. One change has been made, the count fields have been narrowed by 1 bit to make room for a dirty bit in SSE entries, needed when a page is present but unmapped. Section page tables: * The section page tables are now implemented using RtlGenericTables. This enables a fairly compact representation of section page tables without having the existence of a section object imply 4k of fake PDEs. In addition, each node in the generic table has a wide file offset that is a multiple of 256 pages, or 1 megabyte total. Besides needing wide file offsets, the only other visible change caused by the switch to generic tables for section page tables is the need to lock the section segment before interacting with the section page table. Eviction: * Page eviction in cache sections is accomplished by MmpPageOutPhysicalAddress. In the case of a shared page, it tries to remove all mappings of the indicated page. If this process fails at any point, the page will simply be drawn back into the target address spaces. After succeeding at this, if TRUE has been accumulated into the page's dirty bit in the section page table, it is written back, and then permanently removed. NewCC mode: * NEWCC mode is introduced, which rewrites the file cache to a set of cache stripes actively mapped, along with unmapped section data. * NewCC is more authentic in its interpretation of the external interface to the windows cache than the current cache manager, implementing each of the cache manager functions according to the documented interface with no preconceived ideas about how anything should be implemented internally. Cache stripes are implemented on top of section objects, using the same memory manager paths, and therefore economizing code and complexity. This replaces a rather complicated system in which pages can be owned by the cache manager and the memory manager simultaneously and they must cooperate in a fairly sophisticated way to manage them. Since they're quite interdependent in the current code, modifying either is very difficult. In NEWCC, they have a clear division of labor and thus can be worked on independently. * Several third party filesystems that use the kernel Cc interface work properly using NEWCC, including matt wu's ext3 driver. * In contrast with code that tries to make CcInitializeCacheMap and CcUninitializeCacheMap into a pair that supports reference counting, NEWCC lazily initializes the shared and private cache maps as needed and uses the presence of a PrivateCacheMap on at least one file pointing to the SharedCacheMap as an indication that the FILE_OBJECT reference in the SharedCacheMap should still be held. When the last PrivateCacheMap is discarded, that's the appropriate time to tear down caching for a specific file, as the SharedCacheMap data is allowed to be saved and reused. We honor this by making the SharedCacheMap into a depot for keeping track of the PrivateCacheMap objects associated with views of a file. svn path=/trunk/; revision=55833
2012-02-23 12:03:06 +00:00
ASSERT((ULONG_PTR)Address % PAGE_SIZE == 0);
for (i = 0; i < PageCount; i++)
{
if (!MmIsPageInUse(Pages[i]))
{
DPRINT1("Page at address %x not in use\n", PFN_TO_PTE(Pages[i]));
KeBugCheck(MEMORY_MANAGEMENT);
}
}
return(MmCreateVirtualMappingUnsafe(Process,
Address,
flProtect,
Pages,
PageCount));
}
ULONG
NTAPI
MmGetPageProtect(PEPROCESS Process, PVOID Address)
{
ULONG Entry;
ULONG Protect;
Entry = MmGetPageEntryForProcess(Process, Address);
if (!(Entry & PA_PRESENT))
{
Protect = PAGE_NOACCESS;
}
else
{
if (Entry & PA_READWRITE)
{
Protect = PAGE_READWRITE;
}
else
{
Protect = PAGE_EXECUTE_READ;
}
if (Entry & PA_CD)
{
Protect |= PAGE_NOCACHE;
}
if (Entry & PA_WT)
{
Protect |= PAGE_WRITETHROUGH;
}
if (!(Entry & PA_USER))
{
Protect |= PAGE_SYSTEM;
}
}
return(Protect);
}
VOID
NTAPI
MmSetPageProtect(PEPROCESS Process, PVOID Address, ULONG flProtect)
{
ULONG Attributes = 0;
PULONG Pt;
ULONG Pte;
KIRQL OldIrql;
DPRINT("MmSetPageProtect(Process %p Address %p flProtect %x)\n",
Process, Address, flProtect);
Attributes = ProtectToPTE(flProtect);
Attributes &= 0xfff;
if (Address >= MmSystemRangeStart)
{
Attributes &= ~PA_USER;
}
else
{
Attributes |= PA_USER;
}
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt == NULL)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
Pte = InterlockedExchangePte(Pt, PAGE_MASK(*Pt) | Attributes | (*Pt & (PA_ACCESSED|PA_DIRTY)));
// We should be able to bring a page back from PAGE_NOACCESS
if ((Pte & 0x800) || !(Pte >> PAGE_SHIFT))
{
DPRINT1("Invalid Pte %lx\n", Pte);
KeBugCheck(MEMORY_MANAGEMENT);
}
if((Pte & Attributes) != Attributes)
MiFlushTlb(Pt, Address, OldIrql);
else
MmUnmapPageTable(Pt, OldIrql);
}
CODE_SEG("INIT")
VOID
NTAPI
MmInitGlobalKernelPageDirectory(VOID)
{
/* Nothing to do here */
}
/* EOF */