reactos/reactos/ntoskrnl/mm/ARM3/pagfault.c

1912 lines
63 KiB
C
Raw Normal View History

/*
* PROJECT: ReactOS Kernel
* LICENSE: BSD - See COPYING.ARM in the top level directory
* FILE: ntoskrnl/mm/ARM3/pagfault.c
* PURPOSE: ARM Memory Manager Page Fault Handling
* PROGRAMMERS: ReactOS Portable Systems Group
*/
/* INCLUDES *******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
/* GLOBALS ********************************************************************/
#define HYDRA_PROCESS (PEPROCESS)1
#if MI_TRACE_PFNS
BOOLEAN UserPdeFault = FALSE;
#endif
/* PRIVATE FUNCTIONS **********************************************************/
PMMPTE
NTAPI
MiCheckVirtualAddress(IN PVOID VirtualAddress,
OUT PULONG ProtectCode,
OUT PMMVAD *ProtoVad)
{
PMMVAD Vad;
PMMPTE PointerPte;
/* No prototype/section support for now */
*ProtoVad = NULL;
/* User or kernel fault? */
if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS)
{
/* Special case for shared data */
if (PAGE_ALIGN(VirtualAddress) == (PVOID)MM_SHARED_USER_DATA_VA)
{
/* It's a read-only page */
*ProtectCode = MM_READONLY;
return MmSharedUserDataPte;
}
/* Find the VAD, it might not exist if the address is bogus */
Vad = MiLocateAddress(VirtualAddress);
if (!Vad)
{
/* Bogus virtual address */
*ProtectCode = MM_NOACCESS;
return NULL;
}
/* ReactOS does not handle physical memory VADs yet */
ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
/* Check if it's a section, or just an allocation */
if (Vad->u.VadFlags.PrivateMemory)
{
/* ReactOS does not handle AWE VADs yet */
ASSERT(Vad->u.VadFlags.VadType != VadAwe);
/* This must be a TEB/PEB VAD */
if (Vad->u.VadFlags.MemCommit)
{
/* It's committed, so return the VAD protection */
*ProtectCode = (ULONG)Vad->u.VadFlags.Protection;
}
else
{
/* It has not yet been committed, so return no access */
*ProtectCode = MM_NOACCESS;
}
/* In both cases, return no PTE */
return NULL;
}
else
{
/* ReactOS does not supoprt these VADs yet */
ASSERT(Vad->u.VadFlags.VadType != VadImageMap);
ASSERT(Vad->u2.VadFlags2.ExtendableFile == 0);
/* Return the proto VAD */
*ProtoVad = Vad;
/* Get the prototype PTE for this page */
PointerPte = (((ULONG_PTR)VirtualAddress >> PAGE_SHIFT) - Vad->StartingVpn) + Vad->FirstPrototypePte;
ASSERT(PointerPte != NULL);
ASSERT(PointerPte <= Vad->LastContiguousPte);
/* Return the Prototype PTE and the protection for the page mapping */
*ProtectCode = (ULONG)Vad->u.VadFlags.Protection;
return PointerPte;
}
}
else if (MI_IS_PAGE_TABLE_ADDRESS(VirtualAddress))
{
/* This should never happen, as these addresses are handled by the double-maping */
if (((PMMPTE)VirtualAddress >= MiAddressToPte(MmPagedPoolStart)) &&
((PMMPTE)VirtualAddress <= MmPagedPoolInfo.LastPteForPagedPool))
{
/* Fail such access */
*ProtectCode = MM_NOACCESS;
return NULL;
}
/* Return full access rights */
*ProtectCode = MM_READWRITE;
return NULL;
}
else if (MI_IS_SESSION_ADDRESS(VirtualAddress))
{
/* ReactOS does not have an image list yet, so bail out to failure case */
ASSERT(IsListEmpty(&MmSessionSpace->ImageList));
}
/* Default case -- failure */
*ProtectCode = MM_NOACCESS;
return NULL;
}
#if (_MI_PAGING_LEVELS == 2)
BOOLEAN
FORCEINLINE
MiSynchronizeSystemPde(PMMPDE PointerPde)
{
MMPDE SystemPde;
ULONG Index;
/* Get the Index from the PDE */
Index = ((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE);
/* Copy the PDE from the double-mapped system page directory */
SystemPde = MmSystemPagePtes[Index];
*PointerPde = SystemPde;
/* Make sure we re-read the PDE and PTE */
KeMemoryBarrierWithoutFence();
/* Return, if we had success */
return (BOOLEAN)SystemPde.u.Hard.Valid;
}
NTSTATUS
FASTCALL
MiCheckPdeForSessionSpace(IN PVOID Address)
{
MMPTE TempPde;
PMMPTE PointerPde;
PVOID SessionPageTable;
ULONG Index;
/* Is this a session PTE? */
if (MI_IS_SESSION_PTE(Address))
{
/* Make sure the PDE for session space is valid */
PointerPde = MiAddressToPde(MmSessionSpace);
if (!PointerPde->u.Hard.Valid)
{
/* This means there's no valid session, bail out */
DbgPrint("MiCheckPdeForSessionSpace: No current session for PTE %p\n",
Address);
DbgBreakPoint();
return STATUS_ACCESS_VIOLATION;
}
/* Now get the session-specific page table for this address */
SessionPageTable = MiPteToAddress(Address);
PointerPde = MiPteToAddress(Address);
if (PointerPde->u.Hard.Valid) return STATUS_WAIT_1;
/* It's not valid, so find it in the page table array */
Index = ((ULONG_PTR)SessionPageTable - (ULONG_PTR)MmSessionBase) >> 22;
TempPde.u.Long = MmSessionSpace->PageTables[Index].u.Long;
if (TempPde.u.Hard.Valid)
{
/* The copy is valid, so swap it in */
InterlockedExchange((PLONG)PointerPde, TempPde.u.Long);
return STATUS_WAIT_1;
}
/* We don't seem to have allocated a page table for this address yet? */
DbgPrint("MiCheckPdeForSessionSpace: No Session PDE for PTE %p, %p\n",
PointerPde->u.Long, SessionPageTable);
DbgBreakPoint();
return STATUS_ACCESS_VIOLATION;
}
/* Is the address also a session address? If not, we're done */
if (!MI_IS_SESSION_ADDRESS(Address)) return STATUS_SUCCESS;
/* It is, so again get the PDE for session space */
PointerPde = MiAddressToPde(MmSessionSpace);
if (!PointerPde->u.Hard.Valid)
{
/* This means there's no valid session, bail out */
DbgPrint("MiCheckPdeForSessionSpace: No current session for VA %p\n",
Address);
DbgBreakPoint();
return STATUS_ACCESS_VIOLATION;
}
/* Now get the PDE for the address itself */
PointerPde = MiAddressToPde(Address);
if (!PointerPde->u.Hard.Valid)
{
/* Do the swap, we should be good to go */
Index = ((ULONG_PTR)Address - (ULONG_PTR)MmSessionBase) >> 22;
PointerPde->u.Long = MmSessionSpace->PageTables[Index].u.Long;
if (PointerPde->u.Hard.Valid) return STATUS_WAIT_1;
/* We had not allocated a page table for this session address yet, fail! */
DbgPrint("MiCheckPdeForSessionSpace: No Session PDE for VA %p, %p\n",
PointerPde->u.Long, Address);
DbgBreakPoint();
return STATUS_ACCESS_VIOLATION;
}
/* It's valid, so there's nothing to do */
return STATUS_SUCCESS;
}
NTSTATUS
FASTCALL
MiCheckPdeForPagedPool(IN PVOID Address)
{
PMMPDE PointerPde;
NTSTATUS Status = STATUS_SUCCESS;
/* Check session PDE */
if (MI_IS_SESSION_ADDRESS(Address)) return MiCheckPdeForSessionSpace(Address);
if (MI_IS_SESSION_PTE(Address)) return MiCheckPdeForSessionSpace(Address);
//
// Check if this is a fault while trying to access the page table itself
//
if (MI_IS_SYSTEM_PAGE_TABLE_ADDRESS(Address))
{
//
// Send a hint to the page fault handler that this is only a valid fault
// if we already detected this was access within the page table range
//
PointerPde = (PMMPDE)MiAddressToPte(Address);
Status = STATUS_WAIT_1;
}
else if (Address < MmSystemRangeStart)
{
//
// This is totally illegal
//
return STATUS_ACCESS_VIOLATION;
}
else
{
//
// Get the PDE for the address
//
PointerPde = MiAddressToPde(Address);
}
//
// Check if it's not valid
//
if (PointerPde->u.Hard.Valid == 0)
{
//
// Copy it from our double-mapped system page directory
//
InterlockedExchangePte(PointerPde,
MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)].u.Long);
}
//
// Return status
//
return Status;
}
#else
NTSTATUS
FASTCALL
MiCheckPdeForPagedPool(IN PVOID Address)
{
return STATUS_ACCESS_VIOLATION;
}
#endif
VOID
NTAPI
MiZeroPfn(IN PFN_NUMBER PageFrameNumber)
{
PMMPTE ZeroPte;
MMPTE TempPte;
PMMPFN Pfn1;
PVOID ZeroAddress;
/* Get the PFN for this page */
Pfn1 = MiGetPfnEntry(PageFrameNumber);
ASSERT(Pfn1);
/* Grab a system PTE we can use to zero the page */
ZeroPte = MiReserveSystemPtes(1, SystemPteSpace);
ASSERT(ZeroPte);
/* Initialize the PTE for it */
TempPte = ValidKernelPte;
TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
/* Setup caching */
if (Pfn1->u3.e1.CacheAttribute == MiWriteCombined)
{
/* Write combining, no caching */
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_COMBINED(&TempPte);
}
else if (Pfn1->u3.e1.CacheAttribute == MiNonCached)
{
/* Write through, no caching */
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_THROUGH(&TempPte);
}
/* Make the system PTE valid with our PFN */
MI_WRITE_VALID_PTE(ZeroPte, TempPte);
/* Get the address it maps to, and zero it out */
ZeroAddress = MiPteToAddress(ZeroPte);
KeZeroPages(ZeroAddress, PAGE_SIZE);
/* Now get rid of it */
MiReleaseSystemPtes(ZeroPte, 1, SystemPteSpace);
}
NTSTATUS
NTAPI
MiResolveDemandZeroFault(IN PVOID Address,
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
IN PMMPTE PointerPte,
IN PEPROCESS Process,
IN KIRQL OldIrql)
{
PFN_NUMBER PageFrameNumber = 0;
MMPTE TempPte;
BOOLEAN NeedZero = FALSE, HaveLock = FALSE;
ULONG Color;
PMMPFN Pfn1;
DPRINT("ARM3 Demand Zero Page Fault Handler for address: %p in process: %p\n",
Address,
Process);
/* Must currently only be called by paging path */
if ((Process > HYDRA_PROCESS) && (OldIrql == MM_NOIRQL))
{
/* Sanity check */
ASSERT(MI_IS_PAGE_TABLE_ADDRESS(PointerPte));
/* No forking yet */
ASSERT(Process->ForkInProgress == NULL);
/* Get process color */
Color = MI_GET_NEXT_PROCESS_COLOR(Process);
ASSERT(Color != 0xFFFFFFFF);
/* We'll need a zero page */
NeedZero = TRUE;
}
else
{
/* Check if we need a zero page */
NeedZero = (OldIrql != MM_NOIRQL);
/* Session-backed image views must be zeroed */
if ((Process == HYDRA_PROCESS) &&
((MI_IS_SESSION_IMAGE_ADDRESS(Address)) ||
((Address >= MiSessionViewStart) && (Address < MiSessionSpaceWs))))
{
NeedZero = TRUE;
}
/* Hardcode unknown color */
Color = 0xFFFFFFFF;
}
/* Check if the PFN database should be acquired */
if (OldIrql == MM_NOIRQL)
{
/* Acquire it and remember we should release it after */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
HaveLock = TRUE;
}
/* We either manually locked the PFN DB, or already came with it locked */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
ASSERT(PointerPte->u.Hard.Valid == 0);
/* Assert we have enough pages */
ASSERT(MmAvailablePages >= 32);
#if MI_TRACE_PFNS
if (UserPdeFault) MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
if (!UserPdeFault) MI_SET_USAGE(MI_USAGE_DEMAND_ZERO);
#endif
if (Process) MI_SET_PROCESS2(Process->ImageFileName);
if (!Process) MI_SET_PROCESS2("Kernel Demand 0");
/* Do we need a zero page? */
if (Color != 0xFFFFFFFF)
{
/* Try to get one, if we couldn't grab a free page and zero it */
PageFrameNumber = MiRemoveZeroPageSafe(Color);
if (!PageFrameNumber)
{
/* We'll need a free page and zero it manually */
PageFrameNumber = MiRemoveAnyPage(Color);
NeedZero = TRUE;
}
}
else
{
/* Get a color, and see if we should grab a zero or non-zero page */
Color = MI_GET_NEXT_COLOR();
if (!NeedZero)
{
/* Process or system doesn't want a zero page, grab anything */
PageFrameNumber = MiRemoveAnyPage(Color);
}
else
{
/* System wants a zero page, obtain one */
PageFrameNumber = MiRemoveZeroPage(Color);
}
}
/* Initialize it */
MiInitializePfn(PageFrameNumber, PointerPte, TRUE);
/* Do we have the lock? */
if (HaveLock)
{
/* Release it */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Update performance counters */
if (Process > HYDRA_PROCESS) Process->NumberOfPrivatePages++;
}
/* Increment demand zero faults */
InterlockedIncrement(&KeGetCurrentPrcb()->MmDemandZeroCount);
/* Zero the page if need be */
if (NeedZero) MiZeroPfn(PageFrameNumber);
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
/* Fault on user PDE, or fault on user PTE? */
if (PointerPte <= MiHighestUserPte)
{
/* User fault, build a user PTE */
MI_MAKE_HARDWARE_PTE_USER(&TempPte,
PointerPte,
PointerPte->u.Soft.Protection,
PageFrameNumber);
}
else
{
/* This is a user-mode PDE, create a kernel PTE for it */
MI_MAKE_HARDWARE_PTE(&TempPte,
PointerPte,
PointerPte->u.Soft.Protection,
PageFrameNumber);
}
/* Set it dirty if it's a writable page */
if (MI_IS_PAGE_WRITEABLE(&TempPte)) MI_MAKE_DIRTY_PAGE(&TempPte);
/* Write it */
MI_WRITE_VALID_PTE(PointerPte, TempPte);
/* Did we manually acquire the lock */
if (HaveLock)
{
/* Get the PFN entry */
Pfn1 = MI_PFN_ELEMENT(PageFrameNumber);
/* Windows does these sanity checks */
ASSERT(Pfn1->u1.Event == 0);
ASSERT(Pfn1->u3.e1.PrototypePte == 0);
}
//
// It's all good now
//
DPRINT("Demand zero page has now been paged in\n");
return STATUS_PAGE_FAULT_DEMAND_ZERO;
}
NTSTATUS
NTAPI
MiCompleteProtoPteFault(IN BOOLEAN StoreInstruction,
IN PVOID Address,
IN PMMPTE PointerPte,
IN PMMPTE PointerProtoPte,
IN KIRQL OldIrql,
IN PMMPFN* LockedProtoPfn)
{
MMPTE TempPte;
PMMPTE OriginalPte, PageTablePte;
ULONG_PTR Protection;
PFN_NUMBER PageFrameIndex;
PMMPFN Pfn1, Pfn2;
BOOLEAN OriginalProtection, DirtyPage;
/* Must be called with an valid prototype PTE, with the PFN lock held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
ASSERT(PointerProtoPte->u.Hard.Valid == 1);
/* Get the page */
PageFrameIndex = PFN_FROM_PTE(PointerProtoPte);
/* Get the PFN entry and set it as a prototype PTE */
Pfn1 = MiGetPfnEntry(PageFrameIndex);
Pfn1->u3.e1.PrototypePte = 1;
/* Increment the share count for the page table */
// FIXME: This doesn't work because we seem to bump the sharecount to two, and MiDeletePte gets annoyed and ASSERTs.
// This could be beause MiDeletePte is now being called from strange code in Rosmm
PageTablePte = MiAddressToPte(PointerPte);
Pfn2 = MiGetPfnEntry(PageTablePte->u.Hard.PageFrameNumber);
//Pfn2->u2.ShareCount++;
/* Check where we should be getting the protection information from */
if (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
{
/* Get the protection from the PTE, there's no real Proto PTE data */
Protection = PointerPte->u.Soft.Protection;
/* Remember that we did not use the proto protection */
OriginalProtection = FALSE;
}
else
{
/* Get the protection from the original PTE link */
OriginalPte = &Pfn1->OriginalPte;
Protection = OriginalPte->u.Soft.Protection;
/* Remember that we used the original protection */
OriginalProtection = TRUE;
/* Check if this was a write on a read only proto */
if ((StoreInstruction) && !(Protection & MM_READWRITE))
{
/* Clear the flag */
StoreInstruction = 0;
}
}
/* Check if this was a write on a non-COW page */
DirtyPage = FALSE;
if ((StoreInstruction) && ((Protection & MM_WRITECOPY) != MM_WRITECOPY))
{
/* Then the page should be marked dirty */
DirtyPage = TRUE;
/* ReactOS check */
ASSERT(Pfn1->OriginalPte.u.Soft.Prototype != 0);
}
/* Did we get a locked incoming PFN? */
if (*LockedProtoPfn)
{
/* Drop a reference */
ASSERT((*LockedProtoPfn)->u3.e2.ReferenceCount >= 1);
MiDereferencePfnAndDropLockCount(*LockedProtoPfn);
*LockedProtoPfn = NULL;
}
/* Release the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Remove caching bits */
Protection &= ~(MM_NOCACHE | MM_NOACCESS);
/* Setup caching */
if (Pfn1->u3.e1.CacheAttribute == MiWriteCombined)
{
/* Write combining, no caching */
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_COMBINED(&TempPte);
}
else if (Pfn1->u3.e1.CacheAttribute == MiNonCached)
{
/* Write through, no caching */
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_THROUGH(&TempPte);
}
/* Check if this is a kernel or user address */
if (Address < MmSystemRangeStart)
{
/* Build the user PTE */
MI_MAKE_HARDWARE_PTE_USER(&TempPte, PointerPte, Protection, PageFrameIndex);
}
else
{
/* Build the kernel PTE */
MI_MAKE_HARDWARE_PTE(&TempPte, PointerPte, Protection, PageFrameIndex);
}
/* Set the dirty flag if needed */
if (DirtyPage) TempPte.u.Hard.Dirty = TRUE;
/* Write the PTE */
MI_WRITE_VALID_PTE(PointerPte, TempPte);
/* Reset the protection if needed */
if (OriginalProtection) Protection = MM_ZERO_ACCESS;
/* Return success */
ASSERT(PointerPte == MiAddressToPte(Address));
return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
MiResolveTransitionFault(IN PVOID FaultingAddress,
IN PMMPTE PointerPte,
IN PEPROCESS CurrentProcess,
IN KIRQL OldIrql,
OUT PVOID *InPageBlock)
{
PFN_NUMBER PageFrameIndex;
PMMPFN Pfn1;
MMPTE TempPte;
PMMPTE PointerToPteForProtoPage;
DPRINT1("Transition fault on 0x%p with PTE 0x%lx in process %s\n", FaultingAddress, PointerPte, CurrentProcess->ImageFileName);
/* Windowss does this check */
ASSERT(*InPageBlock == NULL);
/* ARM3 doesn't support this path */
ASSERT(OldIrql != MM_NOIRQL);
/* Capture the PTE and make sure it's in transition format */
TempPte = *PointerPte;
ASSERT((TempPte.u.Soft.Valid == 0) &&
(TempPte.u.Soft.Prototype == 0) &&
(TempPte.u.Soft.Transition == 1));
/* Get the PFN and the PFN entry */
PageFrameIndex = TempPte.u.Trans.PageFrameNumber;
DPRINT1("Transition PFN: %lx\n", PageFrameIndex);
Pfn1 = MiGetPfnEntry(PageFrameIndex);
/* One more transition fault! */
InterlockedIncrement(&KeGetCurrentPrcb()->MmTransitionCount);
/* This is from ARM3 -- Windows normally handles this here */
ASSERT(Pfn1->u4.InPageError == 0);
/* Not supported in ARM3 */
ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
/* Windows checks there's some free pages and this isn't an in-page error */
ASSERT(MmAvailablePages >= 0);
ASSERT(Pfn1->u4.InPageError == 0);
/* ReactOS checks for this */
ASSERT(MmAvailablePages > 32);
/* Was this a transition page in the valid list, or free/zero list? */
if (Pfn1->u3.e1.PageLocation == ActiveAndValid)
{
/* All Windows does here is a bunch of sanity checks */
DPRINT1("Transition in active list\n");
ASSERT((Pfn1->PteAddress >= MiAddressToPte(MmPagedPoolStart)) &&
(Pfn1->PteAddress <= MiAddressToPte(MmPagedPoolEnd)));
ASSERT(Pfn1->u2.ShareCount != 0);
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
}
else
{
/* Otherwise, the page is removed from its list */
DPRINT1("Transition page in free/zero list\n");
MiUnlinkPageFromList(Pfn1);
MiReferenceUnusedPageAndBumpLockCount(Pfn1);
}
/* At this point, there should no longer be any in-page errors */
ASSERT(Pfn1->u4.InPageError == 0);
/* Check if this was a PFN with no more share references */
if (Pfn1->u2.ShareCount == 0) MiDropLockCount(Pfn1);
/* Bump the share count and make the page valid */
Pfn1->u2.ShareCount++;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
/* Prototype PTEs are in paged pool, which itself might be in transition */
if (FaultingAddress >= MmSystemRangeStart)
{
/* Check if this is a paged pool PTE in transition state */
PointerToPteForProtoPage = MiAddressToPte(PointerPte);
TempPte = *PointerToPteForProtoPage;
if ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Transition == 1))
{
/* This isn't yet supported */
DPRINT1("Double transition fault not yet supported\n");
ASSERT(FALSE);
}
}
/* Build the transition PTE -- maybe a macro? */
ASSERT(PointerPte->u.Hard.Valid == 0);
ASSERT(PointerPte->u.Trans.Prototype == 0);
ASSERT(PointerPte->u.Trans.Transition == 1);
TempPte.u.Long = (PointerPte->u.Long & ~0xFFF) |
(MmProtectToPteMask[PointerPte->u.Trans.Protection]) |
MiDetermineUserGlobalPteMask(PointerPte);
/* Is the PTE writeable? */
if (((Pfn1->u3.e1.Modified) && (TempPte.u.Hard.Write)) &&
(TempPte.u.Hard.CopyOnWrite == 0))
{
/* Make it dirty */
TempPte.u.Hard.Dirty = TRUE;
}
else
{
/* Make it clean */
TempPte.u.Hard.Dirty = FALSE;
}
/* Write the valid PTE */
MI_WRITE_VALID_PTE(PointerPte, TempPte);
/* Return success */
return STATUS_PAGE_FAULT_TRANSITION;
}
NTSTATUS
NTAPI
MiResolveProtoPteFault(IN BOOLEAN StoreInstruction,
IN PVOID Address,
IN PMMPTE PointerPte,
IN PMMPTE PointerProtoPte,
IN OUT PMMPFN *OutPfn,
OUT PVOID *PageFileData,
OUT PMMPTE PteValue,
IN PEPROCESS Process,
IN KIRQL OldIrql,
IN PVOID TrapInformation)
{
MMPTE TempPte, PteContents;
PMMPFN Pfn1;
PFN_NUMBER PageFrameIndex;
NTSTATUS Status;
PVOID InPageBlock = NULL;
/* Must be called with an invalid, prototype PTE, with the PFN lock held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
ASSERT(PointerPte->u.Hard.Valid == 0);
ASSERT(PointerPte->u.Soft.Prototype == 1);
/* Read the prototype PTE and check if it's valid */
TempPte = *PointerProtoPte;
if (TempPte.u.Hard.Valid == 1)
{
/* One more user of this mapped page */
PageFrameIndex = PFN_FROM_PTE(&TempPte);
Pfn1 = MiGetPfnEntry(PageFrameIndex);
Pfn1->u2.ShareCount++;
/* Call it a transition */
InterlockedIncrement(&KeGetCurrentPrcb()->MmTransitionCount);
/* Complete the prototype PTE fault -- this will release the PFN lock */
return MiCompleteProtoPteFault(StoreInstruction,
Address,
PointerPte,
PointerProtoPte,
OldIrql,
OutPfn);
}
/* Make sure there's some protection mask */
if (TempPte.u.Long == 0)
{
/* Release the lock */
DPRINT1("Access on reserved section?\n");
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
return STATUS_ACCESS_VIOLATION;
}
/* Check for access rights on the PTE proper */
PteContents = *PointerPte;
if (PteContents.u.Soft.PageFileHigh != MI_PTE_LOOKUP_NEEDED)
{
if (!PteContents.u.Proto.ReadOnly)
{
/* FIXME: CHECK FOR ACCESS */
/* Check for copy on write page */
if ((TempPte.u.Soft.Protection & MM_WRITECOPY) == MM_WRITECOPY)
{
/* Not yet supported */
ASSERT(FALSE);
}
}
}
else
{
/* Check for copy on write page */
if ((PteContents.u.Soft.Protection & MM_WRITECOPY) == MM_WRITECOPY)
{
/* Not yet supported */
ASSERT(FALSE);
}
}
/* Check for clone PTEs */
if (PointerPte <= MiHighestUserPte) ASSERT(Process->CloneRoot == NULL);
/* We don't support mapped files yet */
ASSERT(TempPte.u.Soft.Prototype == 0);
/* We might however have transition PTEs */
if (TempPte.u.Soft.Transition == 1)
{
/* Resolve the transition fault */
ASSERT(OldIrql != MM_NOIRQL);
Status = MiResolveTransitionFault(Address,
PointerProtoPte,
Process,
OldIrql,
&InPageBlock);
ASSERT(NT_SUCCESS(Status));
}
else
{
/* We also don't support paged out pages */
ASSERT(TempPte.u.Soft.PageFileHigh == 0);
/* Resolve the demand zero fault */
Status = MiResolveDemandZeroFault(Address,
PointerProtoPte,
Process,
OldIrql);
ASSERT(NT_SUCCESS(Status));
}
/* Complete the prototype PTE fault -- this will release the PFN lock */
ASSERT(PointerPte->u.Hard.Valid == 0);
return MiCompleteProtoPteFault(StoreInstruction,
Address,
PointerPte,
PointerProtoPte,
OldIrql,
OutPfn);
}
NTSTATUS
NTAPI
MiDispatchFault(IN BOOLEAN StoreInstruction,
IN PVOID Address,
IN PMMPTE PointerPte,
IN PMMPTE PointerProtoPte,
IN BOOLEAN Recursive,
IN PEPROCESS Process,
IN PVOID TrapInformation,
IN PMMVAD Vad)
{
MMPTE TempPte;
KIRQL OldIrql, LockIrql;
NTSTATUS Status;
PMMPTE SuperProtoPte;
PMMPFN Pfn1, OutPfn = NULL;
PFN_NUMBER PageFrameIndex, PteCount, ProcessedPtes;
DPRINT("ARM3 Page Fault Dispatcher for address: %p in process: %p\n",
Address,
Process);
/* Make sure the addresses are ok */
ASSERT(PointerPte == MiAddressToPte(Address));
//
// Make sure APCs are off and we're not at dispatch
//
OldIrql = KeGetCurrentIrql();
ASSERT(OldIrql <= APC_LEVEL);
ASSERT(KeAreAllApcsDisabled() == TRUE);
//
// Grab a copy of the PTE
//
TempPte = *PointerPte;
/* Do we have a prototype PTE? */
if (PointerProtoPte)
{
/* This should never happen */
ASSERT(!MI_IS_PHYSICAL_ADDRESS(PointerProtoPte));
/* Check if this is a kernel-mode address */
SuperProtoPte = MiAddressToPte(PointerProtoPte);
if (Address >= MmSystemRangeStart)
{
/* Lock the PFN database */
LockIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Has the PTE been made valid yet? */
if (!SuperProtoPte->u.Hard.Valid)
{
ASSERT(FALSE);
}
else if (PointerPte->u.Hard.Valid == 1)
{
ASSERT(FALSE);
}
/* Resolve the fault -- this will release the PFN lock */
Status = MiResolveProtoPteFault(StoreInstruction,
Address,
PointerPte,
PointerProtoPte,
&OutPfn,
NULL,
NULL,
Process,
LockIrql,
TrapInformation);
ASSERT(Status == STATUS_SUCCESS);
/* Complete this as a transition fault */
ASSERT(OldIrql == KeGetCurrentIrql());
ASSERT(OldIrql <= APC_LEVEL);
ASSERT(KeAreAllApcsDisabled() == TRUE);
return Status;
}
else
{
/* We only handle the lookup path */
ASSERT(PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED);
/* Is there a non-image VAD? */
if ((Vad) &&
(Vad->u.VadFlags.VadType != VadImageMap) &&
!(Vad->u2.VadFlags2.ExtendableFile))
{
/* One day, ReactOS will cluster faults */
ASSERT(Address <= MM_HIGHEST_USER_ADDRESS);
DPRINT("Should cluster fault, but won't\n");
}
/* Only one PTE to handle for now */
PteCount = 1;
ProcessedPtes = 0;
/* Lock the PFN database */
LockIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* We only handle the valid path */
ASSERT(SuperProtoPte->u.Hard.Valid == 1);
/* Capture the PTE */
TempPte = *PointerProtoPte;
/* Loop to handle future case of clustered faults */
while (TRUE)
{
/* For our current usage, this should be true */
if (TempPte.u.Hard.Valid == 1)
{
/* Bump the share count on the PTE */
PageFrameIndex = PFN_FROM_PTE(&TempPte);
Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
Pfn1->u2.ShareCount++;
}
else if ((TempPte.u.Soft.Prototype == 0) &&
(TempPte.u.Soft.Transition == 1))
{
/* No standby support yet */
ASSERT(FALSE);
}
else
{
/* Page is invalid, get out of the loop */
break;
}
/* One more done, was it the last? */
if (++ProcessedPtes == PteCount)
{
/* Complete the fault */
MiCompleteProtoPteFault(StoreInstruction,
Address,
PointerPte,
PointerProtoPte,
LockIrql,
&OutPfn);
/* THIS RELEASES THE PFN LOCK! */
break;
}
/* No clustered faults yet */
ASSERT(FALSE);
}
/* Did we resolve the fault? */
if (ProcessedPtes)
{
/* Bump the transition count */
InterlockedExchangeAdd(&KeGetCurrentPrcb()->MmTransitionCount, ProcessedPtes);
ProcessedPtes--;
/* Loop all the processing we did */
ASSERT(ProcessedPtes == 0);
/* Complete this as a transition fault */
ASSERT(OldIrql == KeGetCurrentIrql());
ASSERT(OldIrql <= APC_LEVEL);
ASSERT(KeAreAllApcsDisabled() == TRUE);
return STATUS_PAGE_FAULT_TRANSITION;
}
/* We did not -- PFN lock is still held, prepare to resolve prototype PTE fault */
OutPfn = MI_PFN_ELEMENT(SuperProtoPte->u.Hard.PageFrameNumber);
MiReferenceUsedPageAndBumpLockCount(OutPfn);
ASSERT(OutPfn->u3.e2.ReferenceCount > 1);
ASSERT(PointerPte->u.Hard.Valid == 0);
/* Resolve the fault -- this will release the PFN lock */
Status = MiResolveProtoPteFault(StoreInstruction,
Address,
PointerPte,
PointerProtoPte,
&OutPfn,
NULL,
NULL,
Process,
LockIrql,
TrapInformation);
ASSERT(Status == STATUS_SUCCESS);
/* Did the routine clean out the PFN or should we? */
if (OutPfn)
{
/* We had a locked PFN, so acquire the PFN lock to dereference it */
ASSERT(PointerProtoPte != NULL);
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Dereference the locked PFN */
MiDereferencePfnAndDropLockCount(OutPfn);
ASSERT(OutPfn->u3.e2.ReferenceCount >= 1);
/* And now release the lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
/* Complete this as a transition fault */
ASSERT(OldIrql == KeGetCurrentIrql());
ASSERT(OldIrql <= APC_LEVEL);
ASSERT(KeAreAllApcsDisabled() == TRUE);
return Status;
}
}
//
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
// The PTE must be invalid but not completely empty. It must also not be a
// prototype PTE as that scenario should've been handled above. These are
// all Windows checks
//
ASSERT(TempPte.u.Hard.Valid == 0);
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
ASSERT(TempPte.u.Soft.Prototype == 0);
ASSERT(TempPte.u.Long != 0);
//
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
// No transition or page file software PTEs in ARM3 yet, so this must be a
// demand zero page. These are all ReactOS checks
//
ASSERT(TempPte.u.Soft.Transition == 0);
ASSERT(TempPte.u.Soft.PageFileHigh == 0);
//
// If we got this far, the PTE can only be a demand zero PTE, which is what
// we want. Go handle it!
//
Status = MiResolveDemandZeroFault(Address,
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
PointerPte,
Process,
MM_NOIRQL);
ASSERT(KeAreAllApcsDisabled() == TRUE);
if (NT_SUCCESS(Status))
{
//
// Make sure we're returning in a sane state and pass the status down
//
ASSERT(OldIrql == KeGetCurrentIrql());
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
return Status;
}
//
// Generate an access fault
//
return STATUS_ACCESS_VIOLATION;
}
NTSTATUS
NTAPI
MmArmAccessFault(IN BOOLEAN StoreInstruction,
IN PVOID Address,
IN KPROCESSOR_MODE Mode,
IN PVOID TrapInformation)
{
KIRQL OldIrql = KeGetCurrentIrql(), LockIrql;
PMMPTE ProtoPte = NULL;
PMMPTE PointerPte = MiAddressToPte(Address);
PMMPDE PointerPde = MiAddressToPde(Address);
#if (_MI_PAGING_LEVELS >= 3)
PMMPDE PointerPpe = MiAddressToPpe(Address);
#if (_MI_PAGING_LEVELS == 4)
PMMPDE PointerPxe = MiAddressToPxe(Address);
#endif
#endif
MMPTE TempPte;
PETHREAD CurrentThread;
PEPROCESS CurrentProcess;
NTSTATUS Status;
PMMSUPPORT WorkingSet;
ULONG ProtectionCode;
PMMVAD Vad;
PFN_NUMBER PageFrameIndex;
ULONG Color;
BOOLEAN IsSessionAddress;
PMMPFN Pfn1;
DPRINT("ARM3 FAULT AT: %p\n", Address);
/* Check for page fault on high IRQL */
if (OldIrql > APC_LEVEL)
{
#if (_MI_PAGING_LEVELS < 3)
/* Could be a page table for paged pool, which we'll allow */
if (MI_IS_SYSTEM_PAGE_TABLE_ADDRESS(Address)) MiSynchronizeSystemPde((PMMPDE)PointerPte);
MiCheckPdeForPagedPool(Address);
#endif
/* Check if any of the top-level pages are invalid */
if (
#if (_MI_PAGING_LEVELS == 4)
(PointerPxe->u.Hard.Valid == 0) ||
#endif
#if (_MI_PAGING_LEVELS >= 3)
(PointerPpe->u.Hard.Valid == 0) ||
#endif
(PointerPde->u.Hard.Valid == 0))
{
/* This fault is not valid, printf out some debugging help */
DbgPrint("MM:***PAGE FAULT AT IRQL > 1 Va %p, IRQL %lx\n",
Address,
OldIrql);
if (TrapInformation)
{
PKTRAP_FRAME TrapFrame = TrapInformation;
DbgPrint("MM:***EIP %p, EFL %p\n", TrapFrame->Eip, TrapFrame->EFlags);
DbgPrint("MM:***EAX %p, ECX %p EDX %p\n", TrapFrame->Eax, TrapFrame->Ecx, TrapFrame->Edx);
DbgPrint("MM:***EBX %p, ESI %p EDI %p\n", TrapFrame->Ebx, TrapFrame->Esi, TrapFrame->Edi);
}
/* Tell the trap handler to fail */
return STATUS_IN_PAGE_ERROR | 0x10000000;
}
/* Not yet implemented in ReactOS */
ASSERT(MI_IS_PAGE_LARGE(PointerPde) == FALSE);
ASSERT(((StoreInstruction) && (PointerPte->u.Hard.CopyOnWrite)) == FALSE);
/* Check if this was a write */
if (StoreInstruction)
{
/* Was it to a read-only page? */
Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
if (!(PointerPte->u.Long & PTE_READWRITE) &&
!(Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE))
{
/* Crash with distinguished bugcheck code */
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
(ULONG_PTR)Address,
PointerPte->u.Long,
(ULONG_PTR)TrapInformation,
10);
}
}
/* Nothing is actually wrong */
DPRINT1("Fault at IRQL1 is ok\n");
return STATUS_SUCCESS;
}
/* Check for kernel fault address */
if (Address >= MmSystemRangeStart)
{
/* Bail out, if the fault came from user mode */
if (Mode == UserMode) return STATUS_ACCESS_VIOLATION;
#if (_MI_PAGING_LEVELS == 4)
/* AMD64 system, check if PXE is invalid */
if (PointerPxe->u.Hard.Valid == 0)
{
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
(ULONG_PTR)Address,
StoreInstruction,
(ULONG_PTR)TrapInformation,
7);
}
#endif
#if (_MI_PAGING_LEVELS == 4)
/* PAE/AMD64 system, check if PPE is invalid */
if (PointerPpe->u.Hard.Valid == 0)
{
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
(ULONG_PTR)Address,
StoreInstruction,
(ULONG_PTR)TrapInformation,
5);
}
#endif
#if (_MI_PAGING_LEVELS == 2)
if (MI_IS_SYSTEM_PAGE_TABLE_ADDRESS(Address)) MiSynchronizeSystemPde((PMMPDE)PointerPte);
MiCheckPdeForPagedPool(Address);
#endif
/* Check if the PDE is invalid */
if (PointerPde->u.Hard.Valid == 0)
{
/* PDE (still) not valid, kill the system */
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
(ULONG_PTR)Address,
StoreInstruction,
(ULONG_PTR)TrapInformation,
2);
}
/* Not handling session faults yet */
IsSessionAddress = MI_IS_SESSION_ADDRESS(Address);
/* The PDE is valid, so read the PTE */
TempPte = *PointerPte;
if (TempPte.u.Hard.Valid == 1)
{
/* Check if this was system space or session space */
if (!IsSessionAddress)
{
/* Check if the PTE is still valid under PFN lock */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
TempPte = *PointerPte;
if (TempPte.u.Hard.Valid)
{
/* Check if this was a write */
if (StoreInstruction)
{
/* Was it to a read-only page? */
Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
if (!(PointerPte->u.Long & PTE_READWRITE) &&
!(Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE))
{
/* Crash with distinguished bugcheck code */
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
(ULONG_PTR)Address,
PointerPte->u.Long,
(ULONG_PTR)TrapInformation,
11);
}
}
}
/* Release PFN lock and return all good */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
return STATUS_SUCCESS;
}
}
/* Check if this was a session PTE that needs to remap the session PDE */
if (MI_IS_SESSION_PTE(Address))
{
/* Do the remapping */
Status = MiCheckPdeForSessionSpace(Address);
if (!NT_SUCCESS(Status))
{
/* It failed, this address is invalid */
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
(ULONG_PTR)Address,
StoreInstruction,
(ULONG_PTR)TrapInformation,
6);
}
}
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
/* Check for a fault on the page table or hyperspace */
if (MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS(Address))
{
#if (_MI_PAGING_LEVELS < 3)
/* Windows does this check but I don't understand why -- it's done above! */
ASSERT(MiCheckPdeForPagedPool(Address) != STATUS_WAIT_1);
#endif
/* Handle this as a user mode fault */
goto UserFault;
}
/* Get the current thread */
CurrentThread = PsGetCurrentThread();
/* What kind of address is this */
if (!IsSessionAddress)
{
/* Use the system working set */
WorkingSet = &MmSystemCacheWs;
CurrentProcess = NULL;
/* Make sure we don't have a recursive working set lock */
if ((CurrentThread->OwnsProcessWorkingSetExclusive) ||
(CurrentThread->OwnsProcessWorkingSetShared) ||
(CurrentThread->OwnsSystemWorkingSetExclusive) ||
(CurrentThread->OwnsSystemWorkingSetShared) ||
(CurrentThread->OwnsSessionWorkingSetExclusive) ||
(CurrentThread->OwnsSessionWorkingSetShared))
{
/* Fail */
return STATUS_IN_PAGE_ERROR | 0x10000000;
}
}
else
{
/* Use the session process and working set */
CurrentProcess = HYDRA_PROCESS;
WorkingSet = &MmSessionSpace->GlobalVirtualAddress->Vm;
/* Make sure we don't have a recursive working set lock */
if ((CurrentThread->OwnsSessionWorkingSetExclusive) ||
(CurrentThread->OwnsSessionWorkingSetShared))
{
/* Fail */
return STATUS_IN_PAGE_ERROR | 0x10000000;
}
}
/* Acquire the working set lock */
KeRaiseIrql(APC_LEVEL, &LockIrql);
MiLockWorkingSet(CurrentThread, WorkingSet);
/* Re-read PTE now that we own the lock */
TempPte = *PointerPte;
if (TempPte.u.Hard.Valid == 1)
{
/* Check if this was a write */
if (StoreInstruction)
{
/* Was it to a read-only page that is not copy on write? */
Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
if (!(TempPte.u.Long & PTE_READWRITE) &&
!(Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE) &&
!(TempPte.u.Hard.CopyOnWrite))
{
/* Case not yet handled */
ASSERT(!IsSessionAddress);
/* Crash with distinguished bugcheck code */
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
(ULONG_PTR)Address,
TempPte.u.Long,
(ULONG_PTR)TrapInformation,
12);
}
}
/* Check for read-only write in session space */
if ((IsSessionAddress) &&
(StoreInstruction) &&
!(TempPte.u.Hard.Write))
{
/* Sanity check */
ASSERT(MI_IS_SESSION_IMAGE_ADDRESS(Address));
/* Was this COW? */
if (TempPte.u.Hard.CopyOnWrite == 0)
{
/* Then this is not allowed */
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
(ULONG_PTR)Address,
(ULONG_PTR)TempPte.u.Long,
(ULONG_PTR)TrapInformation,
13);
}
/* Otherwise, handle COW */
ASSERT(FALSE);
}
/* Release the working set */
MiUnlockWorkingSet(CurrentThread, WorkingSet);
KeLowerIrql(LockIrql);
/* Otherwise, the PDE was probably invalid, and all is good now */
return STATUS_SUCCESS;
}
/* Check one kind of prototype PTE */
if (TempPte.u.Soft.Prototype)
{
/* Make sure protected pool is on, and that this is a pool address */
if ((MmProtectFreedNonPagedPool) &&
(((Address >= MmNonPagedPoolStart) &&
(Address < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
MmSizeOfNonPagedPoolInBytes))) ||
((Address >= MmNonPagedPoolExpansionStart) &&
(Address < MmNonPagedPoolEnd))))
{
/* Bad boy, bad boy, whatcha gonna do, whatcha gonna do when ARM3 comes for you! */
KeBugCheckEx(DRIVER_CAUGHT_MODIFYING_FREED_POOL,
(ULONG_PTR)Address,
StoreInstruction,
Mode,
4);
}
/* Get the prototype PTE! */
ProtoPte = MiProtoPteToPte(&TempPte);
/* Do we need to locate the prototype PTE in session space? */
if ((IsSessionAddress) &&
(TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED))
{
/* Yep, go find it as well as the VAD for it */
ProtoPte = MiCheckVirtualAddress(Address,
&ProtectionCode,
&Vad);
ASSERT(ProtoPte != NULL);
}
}
else
{
/* We don't implement transition PTEs */
ASSERT(TempPte.u.Soft.Transition == 0);
/* Check for no-access PTE */
if (TempPte.u.Soft.Protection == MM_NOACCESS)
{
/* Bugcheck the system! */
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
(ULONG_PTR)Address,
StoreInstruction,
(ULONG_PTR)TrapInformation,
1);
}
}
/* Check for demand page */
if ((StoreInstruction) &&
!(ProtoPte) &&
!(IsSessionAddress) &&
!(TempPte.u.Hard.Valid))
{
/* Get the protection code */
ASSERT(TempPte.u.Soft.Transition == 0);
if (!(TempPte.u.Soft.Protection & MM_READWRITE))
{
/* Bugcheck the system! */
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
(ULONG_PTR)Address,
TempPte.u.Long,
(ULONG_PTR)TrapInformation,
14);
}
}
/* Now do the real fault handling */
Status = MiDispatchFault(StoreInstruction,
Address,
PointerPte,
ProtoPte,
FALSE,
CurrentProcess,
TrapInformation,
NULL);
/* Release the working set */
ASSERT(KeAreAllApcsDisabled() == TRUE);
MiUnlockWorkingSet(CurrentThread, WorkingSet);
KeLowerIrql(LockIrql);
/* We are done! */
DPRINT("Fault resolved with status: %lx\n", Status);
return Status;
}
/* This is a user fault */
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
UserFault:
CurrentThread = PsGetCurrentThread();
CurrentProcess = (PEPROCESS)CurrentThread->Tcb.ApcState.Process;
/* Lock the working set */
MiLockProcessWorkingSet(CurrentProcess, CurrentThread);
#if (_MI_PAGING_LEVELS == 4)
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
// Note to Timo: You should call MiCheckVirtualAddress and also check if it's zero pte
// also this is missing the page count increment
/* Check if the PXE is valid */
if (PointerPxe->u.Hard.Valid == 0)
{
/* Right now, we only handle scenarios where the PXE is totally empty */
ASSERT(PointerPxe->u.Long == 0);
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
#if 0
/* Resolve a demand zero fault */
Status = MiResolveDemandZeroFault(PointerPpe,
MM_READWRITE,
CurrentProcess,
MM_NOIRQL);
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
#endif
/* We should come back with a valid PXE */
ASSERT(PointerPxe->u.Hard.Valid == 1);
}
#endif
#if (_MI_PAGING_LEVELS >= 3)
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
// Note to Timo: You should call MiCheckVirtualAddress and also check if it's zero pte
// also this is missing the page count increment
/* Check if the PPE is valid */
if (PointerPpe->u.Hard.Valid == 0)
{
/* Right now, we only handle scenarios where the PPE is totally empty */
ASSERT(PointerPpe->u.Long == 0);
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
#if 0
/* Resolve a demand zero fault */
Status = MiResolveDemandZeroFault(PointerPde,
MM_READWRITE,
CurrentProcess,
MM_NOIRQL);
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
#endif
/* We should come back with a valid PPE */
ASSERT(PointerPpe->u.Hard.Valid == 1);
}
#endif
/* Check if the PDE is valid */
if (PointerPde->u.Hard.Valid == 0)
{
/* Right now, we only handle scenarios where the PDE is totally empty */
ASSERT(PointerPde->u.Long == 0);
/* And go dispatch the fault on the PDE. This should handle the demand-zero */
#if MI_TRACE_PFNS
UserPdeFault = TRUE;
#endif
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
MiCheckVirtualAddress(Address, &ProtectionCode, &Vad);
if (ProtectionCode == MM_NOACCESS)
{
#if (_MI_PAGING_LEVELS == 2)
/* Could be a page table for paged pool */
MiCheckPdeForPagedPool(Address);
#endif
/* Has the code above changed anything -- is this now a valid PTE? */
Status = (PointerPde->u.Hard.Valid == 1) ? STATUS_SUCCESS : STATUS_ACCESS_VIOLATION;
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
/* Either this was a bogus VA or we've fixed up a paged pool PDE */
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return Status;
}
/* Write a demand-zero PDE */
MI_WRITE_INVALID_PTE(PointerPde, DemandZeroPde);
/* Dispatch the fault */
Status = MiDispatchFault(TRUE,
PointerPte,
PointerPde,
NULL,
FALSE,
PsGetCurrentProcess(),
TrapInformation,
NULL);
#if MI_TRACE_PFNS
UserPdeFault = FALSE;
#endif
/* We should come back with APCs enabled, and with a valid PDE */
ASSERT(KeAreAllApcsDisabled() == TRUE);
ASSERT(PointerPde->u.Hard.Valid == 1);
}
else
{
/* Not yet implemented in ReactOS */
ASSERT(MI_IS_PAGE_LARGE(PointerPde) == FALSE);
}
/* Now capture the PTE. Ignore virtual faults for now */
TempPte = *PointerPte;
ASSERT(TempPte.u.Hard.Valid == 0);
/* Quick check for demand-zero */
if (TempPte.u.Long == (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS))
{
/* Resolve the fault */
MiResolveDemandZeroFault(Address,
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why: Part 1: Page Fault Path Fixes [NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be. [NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path. [NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary. [NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too. Part 2: ARM3 Section Object Fixes [NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs. [NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area. [NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected. svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
PointerPte,
CurrentProcess,
MM_NOIRQL);
/* Return the status */
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return STATUS_PAGE_FAULT_DEMAND_ZERO;
}
/* Check for zero PTE */
if (TempPte.u.Long == 0)
{
/* Check if this address range belongs to a valid allocation (VAD) */
ProtoPte = MiCheckVirtualAddress(Address, &ProtectionCode, &Vad);
if (ProtectionCode == MM_NOACCESS)
{
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
#if (_MI_PAGING_LEVELS == 2)
/* Could be a page table for paged pool */
MiCheckPdeForPagedPool(Address);
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
#endif
/* Has the code above changed anything -- is this now a valid PTE? */
Status = (PointerPte->u.Hard.Valid == 1) ? STATUS_SUCCESS : STATUS_ACCESS_VIOLATION;
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
/* Either this was a bogus VA or we've fixed up a paged pool PDE */
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return Status;
}
/* No guard page support yet */
ASSERT((ProtectionCode & MM_DECOMMIT) == 0);
/*
* Check if this is a real user-mode address or actually a kernel-mode
* page table for a user mode address
*/
if (Address <= MM_HIGHEST_USER_ADDRESS)
{
/* Add an additional page table reference */
MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]++;
ASSERT(MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] <= PTE_COUNT);
}
/* Did we get a prototype PTE back? */
if (!ProtoPte)
{
/* Is this PTE actually part of the PDE-PTE self-mapping directory? */
if (PointerPde == MiAddressToPde(PTE_BASE))
{
/* Then it's really a demand-zero PDE (on behalf of user-mode) */
MI_WRITE_INVALID_PTE(PointerPte, DemandZeroPde);
}
else
{
/* No, create a new PTE. First, write the protection */
PointerPte->u.Soft.Protection = ProtectionCode;
}
/* Lock the PFN database since we're going to grab a page */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Make sure we have enough pages */
ASSERT(MmAvailablePages >= 32);
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
/* Try to get a zero page */
MI_SET_USAGE(MI_USAGE_PEB_TEB);
MI_SET_PROCESS2(CurrentProcess->ImageFileName);
Color = MI_GET_NEXT_PROCESS_COLOR(CurrentProcess);
PageFrameIndex = MiRemoveZeroPageSafe(Color);
if (!PageFrameIndex)
{
/* Grab a page out of there. Later we should grab a colored zero page */
PageFrameIndex = MiRemoveAnyPage(Color);
ASSERT(PageFrameIndex);
/* Release the lock since we need to do some zeroing */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Zero out the page, since it's for user-mode */
MiZeroPfn(PageFrameIndex);
/* Grab the lock again so we can initialize the PFN entry */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
}
/* Initialize the PFN entry now */
MiInitializePfn(PageFrameIndex, PointerPte, 1);
/* And we're done with the lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Increment the count of pages in the process */
CurrentProcess->NumberOfPrivatePages++;
/* One more demand-zero fault */
InterlockedIncrement(&KeGetCurrentPrcb()->MmDemandZeroCount);
/* Fault on user PDE, or fault on user PTE? */
if (PointerPte <= MiHighestUserPte)
{
/* User fault, build a user PTE */
MI_MAKE_HARDWARE_PTE_USER(&TempPte,
PointerPte,
PointerPte->u.Soft.Protection,
PageFrameIndex);
}
else
{
/* This is a user-mode PDE, create a kernel PTE for it */
MI_MAKE_HARDWARE_PTE(&TempPte,
PointerPte,
PointerPte->u.Soft.Protection,
PageFrameIndex);
}
/* Write the dirty bit for writeable pages */
if (MI_IS_PAGE_WRITEABLE(&TempPte)) MI_MAKE_DIRTY_PAGE(&TempPte);
/* And now write down the PTE, making the address valid */
MI_WRITE_VALID_PTE(PointerPte, TempPte);
Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
ASSERT(Pfn1->u1.Event == NULL);
/* Demand zero */
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return STATUS_PAGE_FAULT_DEMAND_ZERO;
[NTOS]: A few other fixups to the page fault path: 1) Assert on empty kernel PTE instead of handling it as a bugcheck. Windows ASSERTs too. Also clarify some ASSERTs which Windows also does versus ASSERTs we are only doing due to lack of support for said feature. 2) User page fault path can now distinguish between a user-mode PTE fault, and a kernel-mode fault on a user PDE, both by creating a correct kernel PDE when needed instead of always creating user PTEs, as well as by only touching the UsedPageTableEntry reference counting mechanism when a user-address is in play. 3) Related to #2, also recognize when the faulting PTE is actually a PDE in the self-mapping region -- another scenario when the "user fault" is actually a kernel fault for a user PDE. 4) Add one more path where a Paged Pool PDE fixup can save the day instead of always faulting. 5) Finally, related to #2 and #3, handle the MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS scenario for a User PDE by treating it as a user fault. The code looks deceptively similar but there are slight differences which require the separate codepaths with some duplicated code. The magic is in the ordering. In trunk, these changes should not cause any regressions (let's hope so). On the internal VAD-based Virtual Memory branch, they now allow booting to 3rd stage and a fully usable ReactOS environment. MEMORY_AREA_VIRTUAL_MEMORY is gone on that branch. It's coming. [NTOS]: Use PAGE_READWRITE as hardcoded protection instead of PAGE_EXECUTE_READWRITE -- the difference is meaningless on ReactOS Mm but actually causes issues on ARM3 with VADs. svn path=/trunk/; revision=55938
2012-02-29 23:11:21 +00:00
}
/* No guard page support yet */
ASSERT((ProtectionCode & MM_DECOMMIT) == 0);
ASSERT(ProtectionCode != 0x100);
/* Write the prototype PTE */
TempPte = PrototypePte;
TempPte.u.Soft.Protection = ProtectionCode;
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
}
else
{
/* Get the protection code and check if this is a proto PTE */
ProtectionCode = TempPte.u.Soft.Protection;
if (TempPte.u.Soft.Prototype)
{
/* Do we need to go find the real PTE? */
if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
{
/* Get the prototype pte and VAD for it */
ProtoPte = MiCheckVirtualAddress(Address,
&ProtectionCode,
&Vad);
if (!ProtoPte)
{
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return STATUS_ACCESS_VIOLATION;
}
}
else
{
/* Get the prototype PTE! */
ProtoPte = MiProtoPteToPte(&TempPte);
/* Is it read-only */
if (TempPte.u.Proto.ReadOnly)
{
/* Set read-only code */
ProtectionCode = MM_READONLY;
}
else
{
/* Set unknown protection */
ProtectionCode = 0x100;
ASSERT(CurrentProcess->CloneRoot != NULL);
}
}
}
}
/* FIXME: Run MiAccessCheck */
/* Dispatch the fault */
Status = MiDispatchFault(StoreInstruction,
Address,
PointerPte,
ProtoPte,
FALSE,
CurrentProcess,
TrapInformation,
Vad);
/* Return the status */
ASSERT(NT_SUCCESS(Status));
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return Status;
}
NTSTATUS
NTAPI
MmGetExecuteOptions(IN PULONG ExecuteOptions)
{
PKPROCESS CurrentProcess = &PsGetCurrentProcess()->Pcb;
ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
*ExecuteOptions = 0;
if (CurrentProcess->Flags.ExecuteDisable)
{
*ExecuteOptions |= MEM_EXECUTE_OPTION_DISABLE;
}
if (CurrentProcess->Flags.ExecuteEnable)
{
*ExecuteOptions |= MEM_EXECUTE_OPTION_ENABLE;
}
if (CurrentProcess->Flags.DisableThunkEmulation)
{
*ExecuteOptions |= MEM_EXECUTE_OPTION_DISABLE_THUNK_EMULATION;
}
if (CurrentProcess->Flags.Permanent)
{
*ExecuteOptions |= MEM_EXECUTE_OPTION_PERMANENT;
}
if (CurrentProcess->Flags.ExecuteDispatchEnable)
{
*ExecuteOptions |= MEM_EXECUTE_OPTION_EXECUTE_DISPATCH_ENABLE;
}
if (CurrentProcess->Flags.ImageDispatchEnable)
{
*ExecuteOptions |= MEM_EXECUTE_OPTION_IMAGE_DISPATCH_ENABLE;
}
return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
MmSetExecuteOptions(IN ULONG ExecuteOptions)
{
PKPROCESS CurrentProcess = &PsGetCurrentProcess()->Pcb;
KLOCK_QUEUE_HANDLE ProcessLock;
NTSTATUS Status = STATUS_ACCESS_DENIED;
ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
/* Only accept valid flags */
if (ExecuteOptions & ~MEM_EXECUTE_OPTION_VALID_FLAGS)
{
/* Fail */
DPRINT1("Invalid no-execute options\n");
return STATUS_INVALID_PARAMETER;
}
/* Change the NX state in the process lock */
KiAcquireProcessLock(CurrentProcess, &ProcessLock);
/* Don't change anything if the permanent flag was set */
if (!CurrentProcess->Flags.Permanent)
{
/* Start by assuming it's not disabled */
CurrentProcess->Flags.ExecuteDisable = FALSE;
/* Now process each flag and turn the equivalent bit on */
if (ExecuteOptions & MEM_EXECUTE_OPTION_DISABLE)
{
CurrentProcess->Flags.ExecuteDisable = TRUE;
}
if (ExecuteOptions & MEM_EXECUTE_OPTION_ENABLE)
{
CurrentProcess->Flags.ExecuteEnable = TRUE;
}
if (ExecuteOptions & MEM_EXECUTE_OPTION_DISABLE_THUNK_EMULATION)
{
CurrentProcess->Flags.DisableThunkEmulation = TRUE;
}
if (ExecuteOptions & MEM_EXECUTE_OPTION_PERMANENT)
{
CurrentProcess->Flags.Permanent = TRUE;
}
if (ExecuteOptions & MEM_EXECUTE_OPTION_EXECUTE_DISPATCH_ENABLE)
{
CurrentProcess->Flags.ExecuteDispatchEnable = TRUE;
}
if (ExecuteOptions & MEM_EXECUTE_OPTION_IMAGE_DISPATCH_ENABLE)
{
CurrentProcess->Flags.ImageDispatchEnable = TRUE;
}
/* These are turned on by default if no-execution is also eanbled */
if (CurrentProcess->Flags.ExecuteEnable)
{
CurrentProcess->Flags.ExecuteDispatchEnable = TRUE;
CurrentProcess->Flags.ImageDispatchEnable = TRUE;
}
/* All good */
Status = STATUS_SUCCESS;
}
/* Release the lock and return status */
KiReleaseProcessLock(&ProcessLock);
return Status;
}
/* EOF */