reactos/ntoskrnl/mm/i386/page.c

948 lines
26 KiB
C
Raw Normal View History

/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/i386/page.c
* PURPOSE: Low level memory management manipulation
*
* PROGRAMMERS: David Welch (welch@cwcom.net)
*/
/* INCLUDES ***************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#include <mm/ARM3/miarm.h>
#ifndef _MI_PAGING_LEVELS
#error "Dude, fix your stuff before using this file"
#endif
/* GLOBALS *****************************************************************/
const
ULONG_PTR
MmProtectToPteMask[32] =
{
//
// These are the base MM_ protection flags
//
0,
PTE_READONLY | PTE_ENABLE_CACHE,
PTE_EXECUTE | PTE_ENABLE_CACHE,
PTE_EXECUTE_READ | PTE_ENABLE_CACHE,
PTE_READWRITE | PTE_ENABLE_CACHE,
PTE_WRITECOPY | PTE_ENABLE_CACHE,
PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE,
PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE,
//
// These OR in the MM_NOCACHE flag
//
0,
PTE_READONLY | PTE_DISABLE_CACHE,
PTE_EXECUTE | PTE_DISABLE_CACHE,
PTE_EXECUTE_READ | PTE_DISABLE_CACHE,
PTE_READWRITE | PTE_DISABLE_CACHE,
PTE_WRITECOPY | PTE_DISABLE_CACHE,
PTE_EXECUTE_READWRITE | PTE_DISABLE_CACHE,
PTE_EXECUTE_WRITECOPY | PTE_DISABLE_CACHE,
//
// These OR in the MM_DECOMMIT flag, which doesn't seem supported on x86/64/ARM
//
0,
PTE_READONLY | PTE_ENABLE_CACHE,
PTE_EXECUTE | PTE_ENABLE_CACHE,
PTE_EXECUTE_READ | PTE_ENABLE_CACHE,
PTE_READWRITE | PTE_ENABLE_CACHE,
PTE_WRITECOPY | PTE_ENABLE_CACHE,
PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE,
PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE,
//
// These OR in the MM_NOACCESS flag, which seems to enable WriteCombining?
//
0,
PTE_READONLY | PTE_WRITECOMBINED_CACHE,
PTE_EXECUTE | PTE_WRITECOMBINED_CACHE,
PTE_EXECUTE_READ | PTE_WRITECOMBINED_CACHE,
PTE_READWRITE | PTE_WRITECOMBINED_CACHE,
PTE_WRITECOPY | PTE_WRITECOMBINED_CACHE,
PTE_EXECUTE_READWRITE | PTE_WRITECOMBINED_CACHE,
PTE_EXECUTE_WRITECOPY | PTE_WRITECOMBINED_CACHE,
};
const
ULONG MmProtectToValue[32] =
{
PAGE_NOACCESS,
PAGE_READONLY,
PAGE_EXECUTE,
PAGE_EXECUTE_READ,
PAGE_READWRITE,
PAGE_WRITECOPY,
PAGE_EXECUTE_READWRITE,
PAGE_EXECUTE_WRITECOPY,
PAGE_NOACCESS,
PAGE_NOCACHE | PAGE_READONLY,
PAGE_NOCACHE | PAGE_EXECUTE,
PAGE_NOCACHE | PAGE_EXECUTE_READ,
PAGE_NOCACHE | PAGE_READWRITE,
PAGE_NOCACHE | PAGE_WRITECOPY,
PAGE_NOCACHE | PAGE_EXECUTE_READWRITE,
PAGE_NOCACHE | PAGE_EXECUTE_WRITECOPY,
PAGE_NOACCESS,
PAGE_GUARD | PAGE_READONLY,
PAGE_GUARD | PAGE_EXECUTE,
PAGE_GUARD | PAGE_EXECUTE_READ,
PAGE_GUARD | PAGE_READWRITE,
PAGE_GUARD | PAGE_WRITECOPY,
PAGE_GUARD | PAGE_EXECUTE_READWRITE,
PAGE_GUARD | PAGE_EXECUTE_WRITECOPY,
PAGE_NOACCESS,
PAGE_WRITECOMBINE | PAGE_READONLY,
PAGE_WRITECOMBINE | PAGE_EXECUTE,
PAGE_WRITECOMBINE | PAGE_EXECUTE_READ,
PAGE_WRITECOMBINE | PAGE_READWRITE,
PAGE_WRITECOMBINE | PAGE_WRITECOPY,
PAGE_WRITECOMBINE | PAGE_EXECUTE_READWRITE,
PAGE_WRITECOMBINE | PAGE_EXECUTE_WRITECOPY
};
/* FUNCTIONS ***************************************************************/
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
NTSTATUS
NTAPI
MiFillSystemPageDirectory(IN PVOID Base,
IN SIZE_T NumberOfBytes);
static
BOOLEAN
MiIsPageTablePresent(PVOID Address)
{
#if _MI_PAGING_LEVELS == 2
BOOLEAN Ret = MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] != 0;
/* Some sanity check while we're here */
ASSERT(Ret == (MiAddressToPde(Address)->u.Hard.Valid != 0));
return Ret;
#else
PMMPDE PointerPde;
PMMPPE PointerPpe;
#if _MI_PAGING_LEVELS == 4
PMMPXE PointerPxe;
#endif
PMMPFN Pfn;
/* Make sure we're locked */
ASSERT((PsGetCurrentThread()->OwnsProcessWorkingSetExclusive) || (PsGetCurrentThread()->OwnsProcessWorkingSetShared));
/* Must not hold the PFN lock! */
ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
/* Check if PXE or PPE have references first. */
#if _MI_PAGING_LEVELS == 4
PointerPxe = MiAddressToPxe(Address);
if ((PointerPxe->u.Hard.Valid == 1) || (PointerPxe->u.Soft.Transition == 1))
{
Pfn = MiGetPfnEntry(PFN_FROM_PXE(PointerPxe));
if (Pfn->OriginalPte.u.Soft.UsedPageTableEntries == 0)
return FALSE;
}
else if (PointerPxe->u.Soft.UsedPageTableEntries == 0)
{
return FALSE;
}
if (PointerPxe->u.Hard.Valid == 0)
{
MiMakeSystemAddressValid(MiPteToAddress(PointerPxe), PsGetCurrentProcess());
}
#endif
PointerPpe = MiAddressToPpe(Address);
if ((PointerPpe->u.Hard.Valid == 1) || (PointerPpe->u.Soft.Transition == 1))
{
Pfn = MiGetPfnEntry(PFN_FROM_PPE(PointerPpe));
if (Pfn->OriginalPte.u.Soft.UsedPageTableEntries == 0)
return FALSE;
}
else if (PointerPpe->u.Soft.UsedPageTableEntries == 0)
{
return FALSE;
}
if (PointerPpe->u.Hard.Valid == 0)
{
MiMakeSystemAddressValid(MiPteToAddress(PointerPpe), PsGetCurrentProcess());
}
PointerPde = MiAddressToPde(Address);
if ((PointerPde->u.Hard.Valid == 0) && (PointerPde->u.Soft.Transition == 0))
{
return PointerPde->u.Soft.UsedPageTableEntries != 0;
}
/* This lies on the PFN */
Pfn = MiGetPfnEntry(PFN_FROM_PDE(PointerPde));
return Pfn->OriginalPte.u.Soft.UsedPageTableEntries != 0;
#endif
}
PFN_NUMBER
NTAPI
MmGetPfnForProcess(PEPROCESS Process,
PVOID Address)
{
PMMPTE PointerPte;
PFN_NUMBER Page;
/* Must be called for user mode only */
ASSERT(Process != NULL);
ASSERT(Address < MmSystemRangeStart);
/* And for our process */
ASSERT(Process == PsGetCurrentProcess());
/* Lock for reading */
MiLockProcessWorkingSetShared(Process, PsGetCurrentThread());
if (!MiIsPageTablePresent(Address))
{
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return 0;
}
/* Make sure we can read the PTE */
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
PointerPte = MiAddressToPte(Address);
Page = PointerPte->u.Hard.Valid ? PFN_FROM_PTE(PointerPte) : 0;
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return Page;
}
/**
* @brief Deletes the virtual mapping and optionally gives back the page & dirty bit.
*
* @param Process - The process this address belongs to, or NULL if system address.
* @param Address - The address to unmap.
* @param WasDirty - Optional param receiving the dirty bit of the PTE.
* @param Page - Optional param receiving the page number previously mapped to this address.
*
* @return Whether there was actually a page mapped at the given address.
*/
_Success_(return)
BOOLEAN
MmDeleteVirtualMappingEx(
_Inout_opt_ PEPROCESS Process,
_In_ PVOID Address,
_Out_opt_ BOOLEAN* WasDirty,
_Out_opt_ PPFN_NUMBER Page,
_In_ BOOLEAN IsPhysical)
{
PMMPTE PointerPte;
MMPTE OldPte;
BOOLEAN ValidPde;
OldPte.u.Long = 0;
DPRINT("MmDeleteVirtualMapping(%p, %p, %p, %p)\n", Process, Address, WasDirty, Page);
ASSERT(((ULONG_PTR)Address % PAGE_SIZE) == 0);
/* And we should be at low IRQL */
ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
/* Make sure our PDE is valid, and that everything is going fine */
if (Process == NULL)
{
if (Address < MmSystemRangeStart)
{
DPRINT1("NULL process given for user-mode mapping at %p\n", Address);
KeBugCheck(MEMORY_MANAGEMENT);
}
#if (_MI_PAGING_LEVELS == 2)
ValidPde = MiSynchronizeSystemPde(MiAddressToPde(Address));
#else
ValidPde = MiIsPdeForAddressValid(Address);
#endif
}
else
{
if ((Address >= MmSystemRangeStart) || Add2Ptr(Address, PAGE_SIZE) >= MmSystemRangeStart)
{
DPRINT1("Process %p given for kernel-mode mapping at %p\n", Process, Address);
KeBugCheck(MEMORY_MANAGEMENT);
}
/* Only for current process !!! */
ASSERT(Process == PsGetCurrentProcess());
MiLockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
ValidPde = MiIsPageTablePresent(Address);
if (ValidPde)
{
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
}
}
/* Get the PTE if we're having anything */
if (ValidPde)
{
PointerPte = MiAddressToPte(Address);
OldPte.u.Long = InterlockedExchangePte(PointerPte, 0);
KeInvalidateTlbEntry(Address);
if (OldPte.u.Long != 0)
{
/* It must have been present, or not a swap entry */
ASSERT(OldPte.u.Hard.Valid || !FlagOn(OldPte.u.Long, 0x800));
if (WasDirty != NULL)
{
*WasDirty = !!OldPte.u.Hard.Dirty;
}
if (Page != NULL)
{
*Page = OldPte.u.Hard.PageFrameNumber;
}
}
}
if (Process != NULL)
{
/* Remove PDE reference, if needed */
if (OldPte.u.Long != 0)
{
if (MiDecrementPageTableReferences(Address) == 0)
{
KIRQL OldIrql = MiAcquirePfnLock();
MiDeletePde(MiAddressToPde(Address), Process);
MiReleasePfnLock(OldIrql);
}
}
if (!IsPhysical && OldPte.u.Hard.Valid)
{
PMMPFN Pfn1;
KIRQL OldIrql;
OldIrql = MiAcquirePfnLock();
Pfn1 = &MmPfnDatabase[OldPte.u.Hard.PageFrameNumber];
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount > 0);
if (--Pfn1->u2.ShareCount == 0)
{
Pfn1->u3.e1.PageLocation = TransitionPage;
}
MiReleasePfnLock(OldIrql);
}
MiUnlockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
}
return OldPte.u.Long != 0;
[CACHE] The cache manager rewrite I started years ago has finally appeared in ReactOS' trunk and although at this point it's not quite perfectly integrated, it's enough to boot up the bootcd or livecd. To check out the more mature original, check out arty-newcc-reactos, branch arty-newcc on bitbucket.org . Amine Khaldi encouraged me quite a bit to not give up on it, and was able to reach out and be an advocate when i really wasn't able to. Others agree that the time has come to begin removing the old cache manager. I expect the remaining problems in the version going to trunk will be taken care of relatively quickly. The motivation for this effort lies in the particularly hairy relationship between ReactOS' cache manager and data sections. This code completely removes page sharing between cache manager and section and reimagines cache manager as being a facility layered on the memory manager, not really caring about individual pages, but simply managing data section objects where caching might occur. It took me about 2 years to do the first pass of this rewrite and most of this year to fix some lingering issues, properly implement demand paging in ReactOS (code which didn't come with this patch in a recognizable form), and finish getting the PrivateCacheMap and SharedCacheMap relationship correct. Currently, the new ntoskrnl/cache directory contains an own implementation of data file sections. After things have settled down, we can begin to deprecate and remove the parts of ReactOS' section implementation that depend on a close relationship with cache manager. Eventually, I think that the extra code added to ntoskrnl/cache/section will be removed and ReactOS' own sections will replace the use of the special MM_CACHE_SECTION_SEGMENT in the cache path. Note also, that this makes all cache manager (and new section parts) use wide file offsets. If my section code were to take over other parts of the ReactOS memory manager, they would also benefit from these improvements. I invite anyone who wants to to peek at this code and fix whatever bugs can be found. svn path=/trunk/; revision=49423
2010-11-02 02:32:39 +00:00
}
_Success_(return)
BOOLEAN
MmDeleteVirtualMapping(
_Inout_opt_ PEPROCESS Process,
_In_ PVOID Address,
_Out_opt_ BOOLEAN * WasDirty,
_Out_opt_ PPFN_NUMBER Page)
{
return MmDeleteVirtualMappingEx(Process, Address, WasDirty, Page, FALSE);
}
_Success_(return)
BOOLEAN
MmDeletePhysicalMapping(
_Inout_opt_ PEPROCESS Process,
_In_ PVOID Address,
_Out_opt_ BOOLEAN * WasDirty,
_Out_opt_ PPFN_NUMBER Page)
{
return MmDeleteVirtualMappingEx(Process, Address, WasDirty, Page, TRUE);
}
VOID
NTAPI
MmDeletePageFileMapping(
PEPROCESS Process,
PVOID Address,
SWAPENTRY* SwapEntry)
{
PMMPTE PointerPte;
MMPTE OldPte;
/* This should not be called for kernel space anymore */
ASSERT(Process != NULL);
ASSERT(Address < MmSystemRangeStart);
/* And we don't support deleting for other process */
ASSERT(Process == PsGetCurrentProcess());
/* And we should be at low IRQL */
ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
/* We are tinkering with the PDE here. Ensure it will be there */
MiLockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
/* Callers must ensure there is actually something there */
ASSERT(MiAddressToPde(Address)->u.Long != 0);
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
PointerPte = MiAddressToPte(Address);
OldPte.u.Long = InterlockedExchangePte(PointerPte, 0);
/* This must be a swap entry ! */
if (!FlagOn(OldPte.u.Long, 0x800) || OldPte.u.Hard.Valid)
{
KeBugCheckEx(MEMORY_MANAGEMENT, OldPte.u.Long, (ULONG_PTR)Process, (ULONG_PTR)Address, 0);
}
/* This used to be a non-zero PTE, now we can let the PDE go. */
if (MiDecrementPageTableReferences(Address) == 0)
{
/* We can let it go */
KIRQL OldIrql = MiAcquirePfnLock();
MiDeletePde(MiPteToPde(PointerPte), Process);
MiReleasePfnLock(OldIrql);
}
MiUnlockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
*SwapEntry = OldPte.u.Long >> 1;
}
BOOLEAN
NTAPI
MmIsPagePresent(PEPROCESS Process, PVOID Address)
{
BOOLEAN Ret;
if (Address >= MmSystemRangeStart)
{
ASSERT(Process == NULL);
#if _MI_PAGING_LEVELS == 2
if (!MiSynchronizeSystemPde(MiAddressToPde(Address)))
#else
if (!MiIsPdeForAddressValid(Address))
#endif
{
/* It can't be present if there is no PDE */
return FALSE;
}
return MiAddressToPte(Address)->u.Hard.Valid;
}
ASSERT(Process != NULL);
ASSERT(Process == PsGetCurrentProcess());
MiLockProcessWorkingSetShared(Process, PsGetCurrentThread());
if (!MiIsPageTablePresent(Address))
{
/* It can't be present if there is no PDE */
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return FALSE;
}
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
Ret = MiAddressToPte(Address)->u.Hard.Valid;
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return Ret;
}
BOOLEAN
NTAPI
MmIsDisabledPage(PEPROCESS Process, PVOID Address)
{
BOOLEAN Ret;
PMMPTE PointerPte;
if (Address >= MmSystemRangeStart)
{
ASSERT(Process == NULL);
#if _MI_PAGING_LEVELS == 2
if (!MiSynchronizeSystemPde(MiAddressToPde(Address)))
#else
if (!MiIsPdeForAddressValid(Address))
#endif
{
/* It's not disabled if it's not present */
return FALSE;
}
}
else
{
ASSERT(Process != NULL);
ASSERT(Process == PsGetCurrentProcess());
MiLockProcessWorkingSetShared(Process, PsGetCurrentThread());
if (!MiIsPageTablePresent(Address))
{
/* It can't be disabled if there is no PDE */
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return FALSE;
}
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
}
PointerPte = MiAddressToPte(Address);
Ret = !PointerPte->u.Hard.Valid
&& !FlagOn(PointerPte->u.Long, 0x800)
&& (PointerPte->u.Hard.PageFrameNumber != 0);
if (Address < MmSystemRangeStart)
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return Ret;
}
BOOLEAN
NTAPI
MmIsPageSwapEntry(PEPROCESS Process, PVOID Address)
{
BOOLEAN Ret;
PMMPTE PointerPte;
/* We never set swap entries for kernel addresses */
if (Address >= MmSystemRangeStart)
{
ASSERT(Process == NULL);
return FALSE;
}
ASSERT(Process != NULL);
ASSERT(Process == PsGetCurrentProcess());
MiLockProcessWorkingSetShared(Process, PsGetCurrentThread());
if (!MiIsPageTablePresent(Address))
{
/* There can't be a swap entry if there is no PDE */
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return FALSE;
}
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
PointerPte = MiAddressToPte(Address);
Ret = !PointerPte->u.Hard.Valid && FlagOn(PointerPte->u.Long, 0x800);
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return Ret;
}
VOID
NTAPI
MmGetPageFileMapping(PEPROCESS Process, PVOID Address, SWAPENTRY* SwapEntry)
{
PMMPTE PointerPte;
/* We never set swap entries for kernel addresses */
if (Address >= MmSystemRangeStart)
{
ASSERT(Process == NULL);
*SwapEntry = 0;
return;
}
ASSERT(Process != NULL);
ASSERT(Process == PsGetCurrentProcess());
MiLockProcessWorkingSetShared(Process, PsGetCurrentThread());
if (!MiIsPageTablePresent(Address))
{
/* There can't be a swap entry if there is no PDE */
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
*SwapEntry = 0;
return;
}
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
PointerPte = MiAddressToPte(Address);
if (!PointerPte->u.Hard.Valid && FlagOn(PointerPte->u.Long, 0x800))
*SwapEntry = PointerPte->u.Long >> 1;
else
*SwapEntry = 0;
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
}
NTSTATUS
NTAPI
MmCreatePageFileMapping(PEPROCESS Process,
PVOID Address,
SWAPENTRY SwapEntry)
{
PMMPTE PointerPte;
ULONG_PTR Pte;
/* This should not be called for kernel space anymore */
ASSERT(Process != NULL);
ASSERT(Address < MmSystemRangeStart);
/* And we don't support creating for other process */
ASSERT(Process == PsGetCurrentProcess());
if (SwapEntry & ((ULONG_PTR)1 << (RTL_BITS_OF(SWAPENTRY) - 1)))
{
KeBugCheck(MEMORY_MANAGEMENT);
}
/* We are tinkering with the PDE here. Ensure it will be there */
ASSERT(Process == PsGetCurrentProcess());
MiLockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
PointerPte = MiAddressToPte(Address);
Pte = InterlockedExchangePte(PointerPte, SwapEntry << 1);
if (Pte != 0)
{
KeBugCheckEx(MEMORY_MANAGEMENT, SwapEntry, (ULONG_PTR)Process, (ULONG_PTR)Address, 0);
}
/* This used to be a 0 PTE, now we need a valid PDE to keep it around */
MiIncrementPageTableReferences(Address);
MiUnlockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
MmCreateVirtualMappingUnsafeEx(
_Inout_opt_ PEPROCESS Process,
_In_ PVOID Address,
_In_ ULONG flProtect,
_In_ PFN_NUMBER Page,
_In_ BOOLEAN IsPhysical)
{
ULONG ProtectionMask;
PMMPTE PointerPte;
MMPTE TempPte;
ULONG_PTR Pte;
DPRINT("MmCreateVirtualMappingUnsafe(%p, %p, %lu, %x)\n",
Process, Address, flProtect, Page);
ASSERT(((ULONG_PTR)Address % PAGE_SIZE) == 0);
ProtectionMask = MiMakeProtectionMask(flProtect);
/* Caller must have checked ! */
ASSERT(ProtectionMask != MM_INVALID_PROTECTION);
ASSERT(ProtectionMask != MM_NOACCESS);
ASSERT(ProtectionMask != MM_ZERO_ACCESS);
/* Make sure our PDE is valid, and that everything is going fine */
if (Process == NULL)
{
2021-06-01 08:32:12 +00:00
/* We don't support this in legacy Mm for kernel mappings */
ASSERT(ProtectionMask != MM_WRITECOPY);
ASSERT(ProtectionMask != MM_EXECUTE_WRITECOPY);
if (Address < MmSystemRangeStart)
{
DPRINT1("NULL process given for user-mode mapping at %p\n", Address);
KeBugCheck(MEMORY_MANAGEMENT);
}
#if _MI_PAGING_LEVELS == 2
if (!MiSynchronizeSystemPde(MiAddressToPde(Address)))
MiFillSystemPageDirectory(Address, PAGE_SIZE);
#endif
}
else
{
if ((Address >= MmSystemRangeStart) || Add2Ptr(Address, PAGE_SIZE) >= MmSystemRangeStart)
{
DPRINT1("Process %p given for kernel-mode mapping at %p -- 1 page starting at %Ix\n",
Process, Address, Page);
KeBugCheck(MEMORY_MANAGEMENT);
}
/* Only for current process !!! */
ASSERT(Process == PsGetCurrentProcess());
MiLockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
}
PointerPte = MiAddressToPte(Address);
2021-06-01 08:32:12 +00:00
MI_MAKE_HARDWARE_PTE(&TempPte, PointerPte, ProtectionMask, Page);
Pte = InterlockedExchangePte(PointerPte, TempPte.u.Long);
/* There should not have been anything valid here */
if (Pte != 0)
{
DPRINT1("Bad PTE %lx at %p for %p\n", Pte, PointerPte, Address);
KeBugCheck(MEMORY_MANAGEMENT);
}
if (!IsPhysical)
{
PMMPFN Pfn1;
KIRQL OldIrql;
OldIrql = MiAcquirePfnLock();
Pfn1 = &MmPfnDatabase[TempPte.u.Hard.PageFrameNumber];
Pfn1->u2.ShareCount++;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
MiReleasePfnLock(OldIrql);
}
/* We don't need to flush the TLB here because it only caches valid translations
* and we're moving this PTE from invalid to valid so it can't be cached right now */
if (Address < MmSystemRangeStart)
{
/* Add PDE reference */
MiIncrementPageTableReferences(Address);
MiUnlockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
}
return(STATUS_SUCCESS);
}
NTSTATUS
NTAPI
MmCreateVirtualMappingUnsafe(
_Inout_opt_ PEPROCESS Process,
_In_ PVOID Address,
_In_ ULONG flProtect,
_In_ PFN_NUMBER Page)
{
return MmCreateVirtualMappingUnsafeEx(Process, Address, flProtect, Page, FALSE);
}
NTSTATUS
NTAPI
MmCreatePhysicalMapping(
_Inout_opt_ PEPROCESS Process,
_In_ PVOID Address,
_In_ ULONG flProtect,
_In_ PFN_NUMBER Page)
{
return MmCreateVirtualMappingUnsafeEx(Process, Address, flProtect, Page, TRUE);
}
NTSTATUS
NTAPI
MmCreateVirtualMapping(PEPROCESS Process,
PVOID Address,
ULONG flProtect,
PFN_NUMBER Page)
{
[NEWCC] A reintegration checkpoint for the NewCC branch, brought to you by Team NewCC. Differences with current ReactOS trunk: * A new memory area type, MEMORY_AREA_CACHE, is added, which represents a mapped region of a file. In NEWCC mode, user sections are MEMORY_AREA_CACHE type as well, and obey new semantics. In non-NEWCC mode, they aren't used. * A way of claiming a page entry for a specific thread's work is added. Placing the special SWAPENTRY value MM_WAIT_ENTRY in a page table, or in a section page table should indicate that memory management code is intended to wait for another thread to make some status change before checking the state of the page entry again. In code that uses this convention, a return value of STATUS_SUCCESS + 1 is used to indicate that the caller should use the MiWaitForPageEvent macro to wait until somebody has change the state of a wait entry before checking again. This is a lighter weight mechanism than PAGEOPs. * A way of asking the caller to perform some blocking operation without locks held is provided. This replaces some spaghettified code in which locks are repeatedly taken and broken by code that performs various blocking operations. Using this mechanism, it is possible to do a small amount of non-blocking work, fill in a request, then return STATUS_MORE_PROCESSING_REQUIRED to request that locks be dropped and the blocking operation be carried out. A MM_REQUIRED_RESOURCES structure is provided to consumers of this contract to use to accumulate state across many blocking operations. Several functions wrapping blocking operations are provided in ntoskrnl/cache/reqtools.c. * Image section pages are no longer direct mapped. This is done to simplify consolidation of ownership of pages under the data section system. At a later time, it may be possible to make data pages directly available to image sections for the same file. This is likely the only direct performance impact this code makes on non-NEWCC mode. RMAPs: * A new type of RMAP entry is introduced, distinguished by RMAP_IS_SEGMENT(Address) of the rmap entry. This kind of entry contains a pointer to a section page table node in the Process pointer, which in turn links back to the MM_SECTION_SEGMENT it belongs to. Therefore, a page belonging only to a segment (that is, a segment page that isn't mapped) can exist and be evicted using the normal page eviction mechanism in balance.c. Each of the rmap function has been modified to deal with segment rmaps. * The low 8 bits of the Address field in a segment rmap denote the entry number in the generic table node pointed to by Process that points to the page the rmap belongs to. By combining them, you can determine the file offset the page belongs to. * In NEWCC mode, MmSharePageEntry/UnsharePageEntry are not used, and instead the page reference count is used to keep track of the number of mappings of a page, allowing the last reference expiring to allow the page to be recycled without much intervention. These are still used in non-NEWCC mode. One change has been made, the count fields have been narrowed by 1 bit to make room for a dirty bit in SSE entries, needed when a page is present but unmapped. Section page tables: * The section page tables are now implemented using RtlGenericTables. This enables a fairly compact representation of section page tables without having the existence of a section object imply 4k of fake PDEs. In addition, each node in the generic table has a wide file offset that is a multiple of 256 pages, or 1 megabyte total. Besides needing wide file offsets, the only other visible change caused by the switch to generic tables for section page tables is the need to lock the section segment before interacting with the section page table. Eviction: * Page eviction in cache sections is accomplished by MmpPageOutPhysicalAddress. In the case of a shared page, it tries to remove all mappings of the indicated page. If this process fails at any point, the page will simply be drawn back into the target address spaces. After succeeding at this, if TRUE has been accumulated into the page's dirty bit in the section page table, it is written back, and then permanently removed. NewCC mode: * NEWCC mode is introduced, which rewrites the file cache to a set of cache stripes actively mapped, along with unmapped section data. * NewCC is more authentic in its interpretation of the external interface to the windows cache than the current cache manager, implementing each of the cache manager functions according to the documented interface with no preconceived ideas about how anything should be implemented internally. Cache stripes are implemented on top of section objects, using the same memory manager paths, and therefore economizing code and complexity. This replaces a rather complicated system in which pages can be owned by the cache manager and the memory manager simultaneously and they must cooperate in a fairly sophisticated way to manage them. Since they're quite interdependent in the current code, modifying either is very difficult. In NEWCC, they have a clear division of labor and thus can be worked on independently. * Several third party filesystems that use the kernel Cc interface work properly using NEWCC, including matt wu's ext3 driver. * In contrast with code that tries to make CcInitializeCacheMap and CcUninitializeCacheMap into a pair that supports reference counting, NEWCC lazily initializes the shared and private cache maps as needed and uses the presence of a PrivateCacheMap on at least one file pointing to the SharedCacheMap as an indication that the FILE_OBJECT reference in the SharedCacheMap should still be held. When the last PrivateCacheMap is discarded, that's the appropriate time to tear down caching for a specific file, as the SharedCacheMap data is allowed to be saved and reused. We honor this by making the SharedCacheMap into a depot for keeping track of the PrivateCacheMap objects associated with views of a file. svn path=/trunk/; revision=55833
2012-02-23 12:03:06 +00:00
ASSERT((ULONG_PTR)Address % PAGE_SIZE == 0);
if (!MmIsPageInUse(Page))
{
DPRINT1("Page %lx is not in use\n", Page);
KeBugCheck(MEMORY_MANAGEMENT);
}
return MmCreateVirtualMappingUnsafe(Process, Address, flProtect, Page);
}
ULONG
NTAPI
MmGetPageProtect(PEPROCESS Process, PVOID Address)
{
PMMPTE PointerPte;
ULONG Protect;
if (Address >= MmSystemRangeStart)
{
ASSERT(Process == NULL);
#if _MI_PAGING_LEVELS == 2
if (!MiSynchronizeSystemPde(MiAddressToPde(Address)))
#else
if (!MiIsPdeForAddressValid(Address))
#endif
{
return PAGE_NOACCESS;
}
}
else
{
ASSERT(Address < MmSystemRangeStart);
ASSERT(Process != NULL);
ASSERT(Process == PsGetCurrentProcess());
MiLockProcessWorkingSetShared(Process, PsGetCurrentThread());
if (!MiIsPageTablePresent(Address))
{
/* It can't be present if there is no PDE */
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return PAGE_NOACCESS;
}
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
}
PointerPte = MiAddressToPte(Address);
if (!PointerPte->u.Flush.Valid)
{
Protect = PAGE_NOACCESS;
}
else
{
if (PointerPte->u.Flush.CopyOnWrite)
Protect = PAGE_WRITECOPY;
else if (PointerPte->u.Flush.Write)
Protect = PAGE_READWRITE;
else
Protect = PAGE_READONLY;
#if _MI_PAGING_LEVELS >= 3
/* PAE & AMD64 long mode support NoExecute bit */
if (!PointerPte->u.Flush.NoExecute)
Protect <<= 4;
#endif
if (PointerPte->u.Flush.CacheDisable)
Protect |= PAGE_NOCACHE;
if (PointerPte->u.Flush.WriteThrough)
Protect |= PAGE_WRITETHROUGH;
}
if (Address < MmSystemRangeStart)
MiUnlockProcessWorkingSetShared(Process, PsGetCurrentThread());
return(Protect);
}
VOID
NTAPI
MmSetPageProtect(PEPROCESS Process, PVOID Address, ULONG flProtect)
{
ULONG ProtectionMask;
PMMPTE PointerPte;
MMPTE TempPte, OldPte;
DPRINT("MmSetPageProtect(Process %p Address %p flProtect %x)\n",
Process, Address, flProtect);
ASSERT(Process != NULL);
ASSERT(Address < MmSystemRangeStart);
ASSERT(Process == PsGetCurrentProcess());
ProtectionMask = MiMakeProtectionMask(flProtect);
/* Caller must have checked ! */
ASSERT(ProtectionMask != MM_INVALID_PROTECTION);
MiLockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
PointerPte = MiAddressToPte(Address);
2021-06-01 08:32:12 +00:00
/* Sanity check */
ASSERT(PointerPte->u.Hard.Owner == 1);
TempPte.u.Long = 0;
TempPte.u.Hard.PageFrameNumber = PointerPte->u.Hard.PageFrameNumber;
TempPte.u.Long |= MmProtectToPteMask[ProtectionMask];
TempPte.u.Hard.Owner = 1;
/* Only set valid bit if we have to */
if ((ProtectionMask != MM_NOACCESS) && !FlagOn(ProtectionMask, MM_GUARDPAGE))
TempPte.u.Hard.Valid = 1;
/* Keep dirty & accessed bits */
TempPte.u.Hard.Accessed = PointerPte->u.Hard.Accessed;
TempPte.u.Hard.Dirty = PointerPte->u.Hard.Dirty;
OldPte.u.Long = InterlockedExchangePte(PointerPte, TempPte.u.Long);
// We should be able to bring a page back from PAGE_NOACCESS
if (!OldPte.u.Hard.Valid && (FlagOn(OldPte.u.Long, 0x800) || (OldPte.u.Hard.PageFrameNumber == 0)))
{
DPRINT1("Invalid Pte %lx\n", OldPte.u.Long);
KeBugCheck(MEMORY_MANAGEMENT);
}
if (OldPte.u.Long != TempPte.u.Long)
KeInvalidateTlbEntry(Address);
MiUnlockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
}
VOID
NTAPI
MmSetDirtyBit(PEPROCESS Process, PVOID Address, BOOLEAN Bit)
{
PMMPTE PointerPte;
DPRINT("MmSetDirtyBit(Process %p Address %p Bit %x)\n",
Process, Address, Bit);
ASSERT(Process != NULL);
ASSERT(Address < MmSystemRangeStart);
ASSERT(Process == PsGetCurrentProcess());
MiLockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
MiMakePdeExistAndMakeValid(MiAddressToPde(Address), Process, MM_NOIRQL);
PointerPte = MiAddressToPte(Address);
// We shouldnl't set dirty bit on non-mapped addresses
if (!PointerPte->u.Hard.Valid && (FlagOn(PointerPte->u.Long, 0x800) || (PointerPte->u.Hard.PageFrameNumber == 0)))
{
DPRINT1("Invalid Pte %lx\n", PointerPte->u.Long);
KeBugCheck(MEMORY_MANAGEMENT);
}
PointerPte->u.Hard.Dirty = !!Bit;
if (!Bit)
KeInvalidateTlbEntry(Address);
MiUnlockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
}
CODE_SEG("INIT")
VOID
NTAPI
MmInitGlobalKernelPageDirectory(VOID)
{
/* Nothing to do here */
}
#ifdef _M_IX86
BOOLEAN
Mmi386MakeKernelPageTableGlobal(PVOID Address)
{
PMMPDE PointerPde = MiAddressToPde(Address);
PMMPTE PointerPte = MiAddressToPte(Address);
if (PointerPde->u.Hard.Valid == 0)
{
if (!MiSynchronizeSystemPde(PointerPde))
return FALSE;
return PointerPte->u.Hard.Valid != 0;
}
return FALSE;
}
#endif
/* EOF */