- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
/*
|
|
|
|
* PROJECT: ReactOS Kernel
|
|
|
|
* LICENSE: BSD - See COPYING.ARM in the top level directory
|
|
|
|
* FILE: ntoskrnl/mm/ARM3/pagfault.c
|
|
|
|
* PURPOSE: ARM Memory Manager Page Fault Handling
|
|
|
|
* PROGRAMMERS: ReactOS Portable Systems Group
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* INCLUDES *******************************************************************/
|
|
|
|
|
|
|
|
#include <ntoskrnl.h>
|
|
|
|
#define NDEBUG
|
|
|
|
#include <debug.h>
|
|
|
|
|
|
|
|
#define MODULE_INVOLVED_IN_ARM3
|
|
|
|
#include "../ARM3/miarm.h"
|
|
|
|
|
|
|
|
/* GLOBALS ********************************************************************/
|
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
#define HYDRA_PROCESS (PEPROCESS)1
|
2010-11-02 15:16:22 +00:00
|
|
|
#if MI_TRACE_PFNS
|
|
|
|
BOOLEAN UserPdeFault = FALSE;
|
|
|
|
#endif
|
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
/* PRIVATE FUNCTIONS **********************************************************/
|
|
|
|
|
2010-07-22 18:37:27 +00:00
|
|
|
PMMPTE
|
|
|
|
NTAPI
|
|
|
|
MiCheckVirtualAddress(IN PVOID VirtualAddress,
|
|
|
|
OUT PULONG ProtectCode,
|
|
|
|
OUT PMMVAD *ProtoVad)
|
|
|
|
{
|
|
|
|
PMMVAD Vad;
|
2010-10-05 08:14:02 +00:00
|
|
|
PMMPTE PointerPte;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 18:37:27 +00:00
|
|
|
/* No prototype/section support for now */
|
|
|
|
*ProtoVad = NULL;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* User or kernel fault? */
|
|
|
|
if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS)
|
2010-10-07 17:27:23 +00:00
|
|
|
{
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Special case for shared data */
|
|
|
|
if (PAGE_ALIGN(VirtualAddress) == (PVOID)MM_SHARED_USER_DATA_VA)
|
2010-10-07 17:27:23 +00:00
|
|
|
{
|
2012-07-31 06:47:47 +00:00
|
|
|
/* It's a read-only page */
|
|
|
|
*ProtectCode = MM_READONLY;
|
|
|
|
return MmSharedUserDataPte;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the VAD, it might not exist if the address is bogus */
|
|
|
|
Vad = MiLocateAddress(VirtualAddress);
|
|
|
|
if (!Vad)
|
|
|
|
{
|
|
|
|
/* Bogus virtual address */
|
2010-10-07 17:27:23 +00:00
|
|
|
*ProtectCode = MM_NOACCESS;
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* ReactOS does not handle physical memory VADs yet */
|
|
|
|
ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Check if it's a section, or just an allocation */
|
|
|
|
if (Vad->u.VadFlags.PrivateMemory)
|
|
|
|
{
|
|
|
|
/* ReactOS does not handle AWE VADs yet */
|
|
|
|
ASSERT(Vad->u.VadFlags.VadType != VadAwe);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* This must be a TEB/PEB VAD */
|
|
|
|
if (Vad->u.VadFlags.MemCommit)
|
|
|
|
{
|
|
|
|
/* It's committed, so return the VAD protection */
|
|
|
|
*ProtectCode = (ULONG)Vad->u.VadFlags.Protection;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* It has not yet been committed, so return no access */
|
|
|
|
*ProtectCode = MM_NOACCESS;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* In both cases, return no PTE */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* ReactOS does not supoprt these VADs yet */
|
|
|
|
ASSERT(Vad->u.VadFlags.VadType != VadImageMap);
|
|
|
|
ASSERT(Vad->u2.VadFlags2.ExtendableFile == 0);
|
2010-10-05 08:14:02 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Return the proto VAD */
|
|
|
|
*ProtoVad = Vad;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Get the prototype PTE for this page */
|
|
|
|
PointerPte = (((ULONG_PTR)VirtualAddress >> PAGE_SHIFT) - Vad->StartingVpn) + Vad->FirstPrototypePte;
|
|
|
|
ASSERT(PointerPte != NULL);
|
|
|
|
ASSERT(PointerPte <= Vad->LastContiguousPte);
|
|
|
|
|
|
|
|
/* Return the Prototype PTE and the protection for the page mapping */
|
[NTOS]: A few key changes to the page fault path:
1) MiCheckVirtualAddress should be called *after* determining if the PTE is a Demand Zero PTE. This is because when memory is allocated with MEM_RESERVE, and then MEM_COMMIT is called later, the VAD does not have the MemCommit flag set to TRUE. As such, MiCheckVirtualAddress returns MM_NOACCESS for the VAD (even though one is found) and the demand zero fault results in an access violation. Double-checked with Windows and this is the right behavior.
2) MiCheckVirtualAddress now supports non-commited reserve VADs (ie: trying to access MEM_RESERVE memory). It used to ASSERT, now it returns MM_NOACCESS so an access violation is raised. Before change #1, this would also happen if MEM_COMMIT was later performed on the ranges, but this is now fixed.
3) When calling MiResolveDemandZeroFault, we should not make the PDE a demand zero PDE. This is senseless. The whole point is that the PDE does exist, and MiInitializePfn needs it to keep track of the page table allocation. Removed the nonsensical line of code which performed cleard the PDE during a demand-zero fault.
I am able to boot to 3rd stage with these changes, so I have seen no regressions. Additionally, with these changes, the as-of-yet-uncommitted VAD-based Virtual Memory code completes 1st stage setup successfully, instead of instantly crashing on boot.
svn path=/trunk/; revision=55894
2012-02-27 23:42:22 +00:00
|
|
|
*ProtectCode = (ULONG)Vad->u.VadFlags.Protection;
|
2012-07-31 06:47:47 +00:00
|
|
|
return PointerPte;
|
[NTOS]: A few key changes to the page fault path:
1) MiCheckVirtualAddress should be called *after* determining if the PTE is a Demand Zero PTE. This is because when memory is allocated with MEM_RESERVE, and then MEM_COMMIT is called later, the VAD does not have the MemCommit flag set to TRUE. As such, MiCheckVirtualAddress returns MM_NOACCESS for the VAD (even though one is found) and the demand zero fault results in an access violation. Double-checked with Windows and this is the right behavior.
2) MiCheckVirtualAddress now supports non-commited reserve VADs (ie: trying to access MEM_RESERVE memory). It used to ASSERT, now it returns MM_NOACCESS so an access violation is raised. Before change #1, this would also happen if MEM_COMMIT was later performed on the ranges, but this is now fixed.
3) When calling MiResolveDemandZeroFault, we should not make the PDE a demand zero PDE. This is senseless. The whole point is that the PDE does exist, and MiInitializePfn needs it to keep track of the page table allocation. Removed the nonsensical line of code which performed cleard the PDE during a demand-zero fault.
I am able to boot to 3rd stage with these changes, so I have seen no regressions. Additionally, with these changes, the as-of-yet-uncommitted VAD-based Virtual Memory code completes 1st stage setup successfully, instead of instantly crashing on boot.
svn path=/trunk/; revision=55894
2012-02-27 23:42:22 +00:00
|
|
|
}
|
2012-07-31 06:47:47 +00:00
|
|
|
}
|
|
|
|
else if (MI_IS_PAGE_TABLE_ADDRESS(VirtualAddress))
|
|
|
|
{
|
|
|
|
/* This should never happen, as these addresses are handled by the double-maping */
|
|
|
|
if (((PMMPTE)VirtualAddress >= MiAddressToPte(MmPagedPoolStart)) &&
|
|
|
|
((PMMPTE)VirtualAddress <= MmPagedPoolInfo.LastPteForPagedPool))
|
[NTOS]: A few key changes to the page fault path:
1) MiCheckVirtualAddress should be called *after* determining if the PTE is a Demand Zero PTE. This is because when memory is allocated with MEM_RESERVE, and then MEM_COMMIT is called later, the VAD does not have the MemCommit flag set to TRUE. As such, MiCheckVirtualAddress returns MM_NOACCESS for the VAD (even though one is found) and the demand zero fault results in an access violation. Double-checked with Windows and this is the right behavior.
2) MiCheckVirtualAddress now supports non-commited reserve VADs (ie: trying to access MEM_RESERVE memory). It used to ASSERT, now it returns MM_NOACCESS so an access violation is raised. Before change #1, this would also happen if MEM_COMMIT was later performed on the ranges, but this is now fixed.
3) When calling MiResolveDemandZeroFault, we should not make the PDE a demand zero PDE. This is senseless. The whole point is that the PDE does exist, and MiInitializePfn needs it to keep track of the page table allocation. Removed the nonsensical line of code which performed cleard the PDE during a demand-zero fault.
I am able to boot to 3rd stage with these changes, so I have seen no regressions. Additionally, with these changes, the as-of-yet-uncommitted VAD-based Virtual Memory code completes 1st stage setup successfully, instead of instantly crashing on boot.
svn path=/trunk/; revision=55894
2012-02-27 23:42:22 +00:00
|
|
|
{
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Fail such access */
|
[NTOS]: A few key changes to the page fault path:
1) MiCheckVirtualAddress should be called *after* determining if the PTE is a Demand Zero PTE. This is because when memory is allocated with MEM_RESERVE, and then MEM_COMMIT is called later, the VAD does not have the MemCommit flag set to TRUE. As such, MiCheckVirtualAddress returns MM_NOACCESS for the VAD (even though one is found) and the demand zero fault results in an access violation. Double-checked with Windows and this is the right behavior.
2) MiCheckVirtualAddress now supports non-commited reserve VADs (ie: trying to access MEM_RESERVE memory). It used to ASSERT, now it returns MM_NOACCESS so an access violation is raised. Before change #1, this would also happen if MEM_COMMIT was later performed on the ranges, but this is now fixed.
3) When calling MiResolveDemandZeroFault, we should not make the PDE a demand zero PDE. This is senseless. The whole point is that the PDE does exist, and MiInitializePfn needs it to keep track of the page table allocation. Removed the nonsensical line of code which performed cleard the PDE during a demand-zero fault.
I am able to boot to 3rd stage with these changes, so I have seen no regressions. Additionally, with these changes, the as-of-yet-uncommitted VAD-based Virtual Memory code completes 1st stage setup successfully, instead of instantly crashing on boot.
svn path=/trunk/; revision=55894
2012-02-27 23:42:22 +00:00
|
|
|
*ProtectCode = MM_NOACCESS;
|
2012-07-31 06:47:47 +00:00
|
|
|
return NULL;
|
[NTOS]: A few key changes to the page fault path:
1) MiCheckVirtualAddress should be called *after* determining if the PTE is a Demand Zero PTE. This is because when memory is allocated with MEM_RESERVE, and then MEM_COMMIT is called later, the VAD does not have the MemCommit flag set to TRUE. As such, MiCheckVirtualAddress returns MM_NOACCESS for the VAD (even though one is found) and the demand zero fault results in an access violation. Double-checked with Windows and this is the right behavior.
2) MiCheckVirtualAddress now supports non-commited reserve VADs (ie: trying to access MEM_RESERVE memory). It used to ASSERT, now it returns MM_NOACCESS so an access violation is raised. Before change #1, this would also happen if MEM_COMMIT was later performed on the ranges, but this is now fixed.
3) When calling MiResolveDemandZeroFault, we should not make the PDE a demand zero PDE. This is senseless. The whole point is that the PDE does exist, and MiInitializePfn needs it to keep track of the page table allocation. Removed the nonsensical line of code which performed cleard the PDE during a demand-zero fault.
I am able to boot to 3rd stage with these changes, so I have seen no regressions. Additionally, with these changes, the as-of-yet-uncommitted VAD-based Virtual Memory code completes 1st stage setup successfully, instead of instantly crashing on boot.
svn path=/trunk/; revision=55894
2012-02-27 23:42:22 +00:00
|
|
|
}
|
2012-07-31 06:47:47 +00:00
|
|
|
|
|
|
|
/* Return full access rights */
|
|
|
|
*ProtectCode = MM_READWRITE;
|
2010-10-05 08:14:02 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2012-07-31 06:47:47 +00:00
|
|
|
else if (MI_IS_SESSION_ADDRESS(VirtualAddress))
|
2010-10-05 08:14:02 +00:00
|
|
|
{
|
2012-07-31 06:47:47 +00:00
|
|
|
/* ReactOS does not have an image list yet, so bail out to failure case */
|
|
|
|
ASSERT(IsListEmpty(&MmSessionSpace->ImageList));
|
2010-10-05 08:14:02 +00:00
|
|
|
}
|
2012-07-31 06:47:47 +00:00
|
|
|
|
|
|
|
/* Default case -- failure */
|
|
|
|
*ProtectCode = MM_NOACCESS;
|
|
|
|
return NULL;
|
2010-07-22 18:37:27 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-02-06 10:46:52 +00:00
|
|
|
#if (_MI_PAGING_LEVELS == 2)
|
|
|
|
BOOLEAN
|
|
|
|
FORCEINLINE
|
|
|
|
MiSynchronizeSystemPde(PMMPDE PointerPde)
|
|
|
|
{
|
|
|
|
MMPDE SystemPde;
|
|
|
|
ULONG Index;
|
|
|
|
|
|
|
|
/* Get the Index from the PDE */
|
|
|
|
Index = ((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE);
|
|
|
|
|
|
|
|
/* Copy the PDE from the double-mapped system page directory */
|
|
|
|
SystemPde = MmSystemPagePtes[Index];
|
|
|
|
*PointerPde = SystemPde;
|
|
|
|
|
|
|
|
/* Make sure we re-read the PDE and PTE */
|
|
|
|
KeMemoryBarrierWithoutFence();
|
|
|
|
|
|
|
|
/* Return, if we had success */
|
|
|
|
return (BOOLEAN)SystemPde.u.Hard.Valid;
|
|
|
|
}
|
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
NTSTATUS
|
|
|
|
FASTCALL
|
|
|
|
MiCheckPdeForSessionSpace(IN PVOID Address)
|
|
|
|
{
|
2012-08-01 07:54:37 +00:00
|
|
|
MMPTE TempPde;
|
|
|
|
PMMPTE PointerPde;
|
2012-09-20 07:44:43 +00:00
|
|
|
PVOID SessionAddress;
|
2012-08-01 07:54:37 +00:00
|
|
|
ULONG Index;
|
|
|
|
|
|
|
|
/* Is this a session PTE? */
|
|
|
|
if (MI_IS_SESSION_PTE(Address))
|
|
|
|
{
|
|
|
|
/* Make sure the PDE for session space is valid */
|
|
|
|
PointerPde = MiAddressToPde(MmSessionSpace);
|
|
|
|
if (!PointerPde->u.Hard.Valid)
|
|
|
|
{
|
|
|
|
/* This means there's no valid session, bail out */
|
|
|
|
DbgPrint("MiCheckPdeForSessionSpace: No current session for PTE %p\n",
|
|
|
|
Address);
|
|
|
|
DbgBreakPoint();
|
|
|
|
return STATUS_ACCESS_VIOLATION;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now get the session-specific page table for this address */
|
2012-09-20 07:44:43 +00:00
|
|
|
SessionAddress = MiPteToAddress(Address);
|
2012-09-20 08:20:24 +00:00
|
|
|
PointerPde = MiAddressToPte(Address);
|
2012-08-01 07:54:37 +00:00
|
|
|
if (PointerPde->u.Hard.Valid) return STATUS_WAIT_1;
|
|
|
|
|
|
|
|
/* It's not valid, so find it in the page table array */
|
2012-09-20 07:44:43 +00:00
|
|
|
Index = ((ULONG_PTR)SessionAddress - (ULONG_PTR)MmSessionBase) >> 22;
|
2012-08-01 07:54:37 +00:00
|
|
|
TempPde.u.Long = MmSessionSpace->PageTables[Index].u.Long;
|
|
|
|
if (TempPde.u.Hard.Valid)
|
|
|
|
{
|
|
|
|
/* The copy is valid, so swap it in */
|
|
|
|
InterlockedExchange((PLONG)PointerPde, TempPde.u.Long);
|
|
|
|
return STATUS_WAIT_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We don't seem to have allocated a page table for this address yet? */
|
|
|
|
DbgPrint("MiCheckPdeForSessionSpace: No Session PDE for PTE %p, %p\n",
|
2012-09-20 07:44:43 +00:00
|
|
|
PointerPde->u.Long, SessionAddress);
|
2012-08-01 07:54:37 +00:00
|
|
|
DbgBreakPoint();
|
|
|
|
return STATUS_ACCESS_VIOLATION;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is the address also a session address? If not, we're done */
|
|
|
|
if (!MI_IS_SESSION_ADDRESS(Address)) return STATUS_SUCCESS;
|
|
|
|
|
|
|
|
/* It is, so again get the PDE for session space */
|
|
|
|
PointerPde = MiAddressToPde(MmSessionSpace);
|
|
|
|
if (!PointerPde->u.Hard.Valid)
|
|
|
|
{
|
|
|
|
/* This means there's no valid session, bail out */
|
|
|
|
DbgPrint("MiCheckPdeForSessionSpace: No current session for VA %p\n",
|
|
|
|
Address);
|
|
|
|
DbgBreakPoint();
|
|
|
|
return STATUS_ACCESS_VIOLATION;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now get the PDE for the address itself */
|
|
|
|
PointerPde = MiAddressToPde(Address);
|
|
|
|
if (!PointerPde->u.Hard.Valid)
|
|
|
|
{
|
|
|
|
/* Do the swap, we should be good to go */
|
|
|
|
Index = ((ULONG_PTR)Address - (ULONG_PTR)MmSessionBase) >> 22;
|
|
|
|
PointerPde->u.Long = MmSessionSpace->PageTables[Index].u.Long;
|
|
|
|
if (PointerPde->u.Hard.Valid) return STATUS_WAIT_1;
|
|
|
|
|
|
|
|
/* We had not allocated a page table for this session address yet, fail! */
|
|
|
|
DbgPrint("MiCheckPdeForSessionSpace: No Session PDE for VA %p, %p\n",
|
|
|
|
PointerPde->u.Long, Address);
|
|
|
|
DbgBreakPoint();
|
|
|
|
return STATUS_ACCESS_VIOLATION;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* It's valid, so there's nothing to do */
|
|
|
|
return STATUS_SUCCESS;
|
2012-07-31 06:47:47 +00:00
|
|
|
}
|
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
NTSTATUS
|
|
|
|
FASTCALL
|
|
|
|
MiCheckPdeForPagedPool(IN PVOID Address)
|
|
|
|
{
|
2010-02-09 22:56:21 +00:00
|
|
|
PMMPDE PointerPde;
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
NTSTATUS Status = STATUS_SUCCESS;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Check session PDE */
|
|
|
|
if (MI_IS_SESSION_ADDRESS(Address)) return MiCheckPdeForSessionSpace(Address);
|
|
|
|
if (MI_IS_SESSION_PTE(Address)) return MiCheckPdeForSessionSpace(Address);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
|
|
|
// Check if this is a fault while trying to access the page table itself
|
|
|
|
//
|
2010-07-22 02:20:27 +00:00
|
|
|
if (MI_IS_SYSTEM_PAGE_TABLE_ADDRESS(Address))
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
{
|
|
|
|
//
|
|
|
|
// Send a hint to the page fault handler that this is only a valid fault
|
|
|
|
// if we already detected this was access within the page table range
|
|
|
|
//
|
2010-02-09 22:56:21 +00:00
|
|
|
PointerPde = (PMMPDE)MiAddressToPte(Address);
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
Status = STATUS_WAIT_1;
|
|
|
|
}
|
|
|
|
else if (Address < MmSystemRangeStart)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// This is totally illegal
|
|
|
|
//
|
2010-12-26 15:23:03 +00:00
|
|
|
return STATUS_ACCESS_VIOLATION;
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Get the PDE for the address
|
|
|
|
//
|
|
|
|
PointerPde = MiAddressToPde(Address);
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
|
|
|
// Check if it's not valid
|
|
|
|
//
|
|
|
|
if (PointerPde->u.Hard.Valid == 0)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Copy it from our double-mapped system page directory
|
|
|
|
//
|
|
|
|
InterlockedExchangePte(PointerPde,
|
2010-09-30 14:48:03 +00:00
|
|
|
MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)].u.Long);
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
|
|
|
// Return status
|
|
|
|
//
|
|
|
|
return Status;
|
|
|
|
}
|
2012-02-06 15:08:32 +00:00
|
|
|
#else
|
|
|
|
NTSTATUS
|
|
|
|
FASTCALL
|
|
|
|
MiCheckPdeForPagedPool(IN PVOID Address)
|
|
|
|
{
|
|
|
|
return STATUS_ACCESS_VIOLATION;
|
|
|
|
}
|
2012-02-06 10:46:52 +00:00
|
|
|
#endif
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiZeroPfn(IN PFN_NUMBER PageFrameNumber)
|
|
|
|
{
|
|
|
|
PMMPTE ZeroPte;
|
|
|
|
MMPTE TempPte;
|
|
|
|
PMMPFN Pfn1;
|
|
|
|
PVOID ZeroAddress;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Get the PFN for this page */
|
|
|
|
Pfn1 = MiGetPfnEntry(PageFrameNumber);
|
|
|
|
ASSERT(Pfn1);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Grab a system PTE we can use to zero the page */
|
|
|
|
ZeroPte = MiReserveSystemPtes(1, SystemPteSpace);
|
|
|
|
ASSERT(ZeroPte);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Initialize the PTE for it */
|
|
|
|
TempPte = ValidKernelPte;
|
|
|
|
TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
|
|
|
|
|
|
|
|
/* Setup caching */
|
|
|
|
if (Pfn1->u3.e1.CacheAttribute == MiWriteCombined)
|
|
|
|
{
|
|
|
|
/* Write combining, no caching */
|
|
|
|
MI_PAGE_DISABLE_CACHE(&TempPte);
|
|
|
|
MI_PAGE_WRITE_COMBINED(&TempPte);
|
|
|
|
}
|
|
|
|
else if (Pfn1->u3.e1.CacheAttribute == MiNonCached)
|
|
|
|
{
|
|
|
|
/* Write through, no caching */
|
|
|
|
MI_PAGE_DISABLE_CACHE(&TempPte);
|
|
|
|
MI_PAGE_WRITE_THROUGH(&TempPte);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make the system PTE valid with our PFN */
|
|
|
|
MI_WRITE_VALID_PTE(ZeroPte, TempPte);
|
|
|
|
|
|
|
|
/* Get the address it maps to, and zero it out */
|
|
|
|
ZeroAddress = MiPteToAddress(ZeroPte);
|
|
|
|
KeZeroPages(ZeroAddress, PAGE_SIZE);
|
|
|
|
|
|
|
|
/* Now get rid of it */
|
|
|
|
MiReleaseSystemPtes(ZeroPte, 1, SystemPteSpace);
|
|
|
|
}
|
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiResolveDemandZeroFault(IN PVOID Address,
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
IN PMMPTE PointerPte,
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
IN PEPROCESS Process,
|
|
|
|
IN KIRQL OldIrql)
|
|
|
|
{
|
2010-09-29 01:10:28 +00:00
|
|
|
PFN_NUMBER PageFrameNumber = 0;
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
MMPTE TempPte;
|
2010-10-04 18:51:07 +00:00
|
|
|
BOOLEAN NeedZero = FALSE, HaveLock = FALSE;
|
2010-09-29 01:10:28 +00:00
|
|
|
ULONG Color;
|
2012-07-31 06:47:47 +00:00
|
|
|
PMMPFN Pfn1;
|
2010-06-06 04:37:53 +00:00
|
|
|
DPRINT("ARM3 Demand Zero Page Fault Handler for address: %p in process: %p\n",
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
Address,
|
|
|
|
Process);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Must currently only be called by paging path */
|
2012-07-31 06:47:47 +00:00
|
|
|
if ((Process > HYDRA_PROCESS) && (OldIrql == MM_NOIRQL))
|
2010-07-22 02:20:27 +00:00
|
|
|
{
|
|
|
|
/* Sanity check */
|
|
|
|
ASSERT(MI_IS_PAGE_TABLE_ADDRESS(PointerPte));
|
|
|
|
|
|
|
|
/* No forking yet */
|
|
|
|
ASSERT(Process->ForkInProgress == NULL);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-09-29 01:10:28 +00:00
|
|
|
/* Get process color */
|
|
|
|
Color = MI_GET_NEXT_PROCESS_COLOR(Process);
|
2010-10-04 18:51:07 +00:00
|
|
|
ASSERT(Color != 0xFFFFFFFF);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* We'll need a zero page */
|
|
|
|
NeedZero = TRUE;
|
|
|
|
}
|
2010-09-29 01:10:28 +00:00
|
|
|
else
|
|
|
|
{
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Check if we need a zero page */
|
|
|
|
NeedZero = (OldIrql != MM_NOIRQL);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 07:11:52 +00:00
|
|
|
/* Session-backed image views must be zeroed */
|
|
|
|
if ((Process == HYDRA_PROCESS) &&
|
|
|
|
((MI_IS_SESSION_IMAGE_ADDRESS(Address)) ||
|
2012-08-01 07:54:37 +00:00
|
|
|
((Address >= MiSessionViewStart) && (Address < MiSessionSpaceWs))))
|
2012-07-31 07:11:52 +00:00
|
|
|
{
|
|
|
|
NeedZero = TRUE;
|
|
|
|
}
|
2012-08-01 07:54:37 +00:00
|
|
|
|
2012-07-31 07:11:52 +00:00
|
|
|
/* Hardcode unknown color */
|
|
|
|
Color = 0xFFFFFFFF;
|
2010-09-29 01:10:28 +00:00
|
|
|
}
|
2010-10-04 18:51:07 +00:00
|
|
|
|
|
|
|
/* Check if the PFN database should be acquired */
|
|
|
|
if (OldIrql == MM_NOIRQL)
|
|
|
|
{
|
|
|
|
/* Acquire it and remember we should release it after */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
|
|
|
HaveLock = TRUE;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* We either manually locked the PFN DB, or already came with it locked */
|
|
|
|
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 0);
|
2012-07-31 06:47:47 +00:00
|
|
|
|
|
|
|
/* Assert we have enough pages */
|
|
|
|
ASSERT(MmAvailablePages >= 32);
|
|
|
|
|
2010-11-02 15:16:22 +00:00
|
|
|
#if MI_TRACE_PFNS
|
|
|
|
if (UserPdeFault) MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
|
|
|
|
if (!UserPdeFault) MI_SET_USAGE(MI_USAGE_DEMAND_ZERO);
|
|
|
|
#endif
|
|
|
|
if (Process) MI_SET_PROCESS2(Process->ImageFileName);
|
|
|
|
if (!Process) MI_SET_PROCESS2("Kernel Demand 0");
|
2012-07-31 06:47:47 +00:00
|
|
|
|
|
|
|
/* Do we need a zero page? */
|
2012-07-31 07:11:52 +00:00
|
|
|
if (Color != 0xFFFFFFFF)
|
2010-09-29 01:10:28 +00:00
|
|
|
{
|
|
|
|
/* Try to get one, if we couldn't grab a free page and zero it */
|
|
|
|
PageFrameNumber = MiRemoveZeroPageSafe(Color);
|
2012-07-31 07:11:52 +00:00
|
|
|
if (!PageFrameNumber)
|
2010-10-04 18:51:07 +00:00
|
|
|
{
|
|
|
|
/* We'll need a free page and zero it manually */
|
|
|
|
PageFrameNumber = MiRemoveAnyPage(Color);
|
2012-07-31 07:11:52 +00:00
|
|
|
NeedZero = TRUE;
|
2010-10-04 18:51:07 +00:00
|
|
|
}
|
2010-09-29 01:10:28 +00:00
|
|
|
}
|
2010-10-04 18:51:07 +00:00
|
|
|
else
|
|
|
|
{
|
2012-07-31 07:11:52 +00:00
|
|
|
/* Get a color, and see if we should grab a zero or non-zero page */
|
|
|
|
Color = MI_GET_NEXT_COLOR();
|
|
|
|
if (!NeedZero)
|
|
|
|
{
|
|
|
|
/* Process or system doesn't want a zero page, grab anything */
|
|
|
|
PageFrameNumber = MiRemoveAnyPage(Color);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* System wants a zero page, obtain one */
|
|
|
|
PageFrameNumber = MiRemoveZeroPage(Color);
|
|
|
|
}
|
2010-09-29 01:10:28 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-06-04 20:18:27 +00:00
|
|
|
/* Initialize it */
|
|
|
|
MiInitializePfn(PageFrameNumber, PointerPte, TRUE);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Do we have the lock? */
|
|
|
|
if (HaveLock)
|
|
|
|
{
|
|
|
|
/* Release it */
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
|
|
|
|
/* Update performance counters */
|
|
|
|
if (Process > HYDRA_PROCESS) Process->NumberOfPrivatePages++;
|
|
|
|
}
|
2012-02-06 09:26:23 +00:00
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Increment demand zero faults */
|
|
|
|
InterlockedIncrement(&KeGetCurrentPrcb()->MmDemandZeroCount);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Zero the page if need be */
|
|
|
|
if (NeedZero) MiZeroPfn(PageFrameNumber);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
/* Fault on user PDE, or fault on user PTE? */
|
|
|
|
if (PointerPte <= MiHighestUserPte)
|
|
|
|
{
|
|
|
|
/* User fault, build a user PTE */
|
|
|
|
MI_MAKE_HARDWARE_PTE_USER(&TempPte,
|
|
|
|
PointerPte,
|
|
|
|
PointerPte->u.Soft.Protection,
|
|
|
|
PageFrameNumber);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* This is a user-mode PDE, create a kernel PTE for it */
|
|
|
|
MI_MAKE_HARDWARE_PTE(&TempPte,
|
|
|
|
PointerPte,
|
|
|
|
PointerPte->u.Soft.Protection,
|
|
|
|
PageFrameNumber);
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Set it dirty if it's a writable page */
|
2010-11-24 15:21:45 +00:00
|
|
|
if (MI_IS_PAGE_WRITEABLE(&TempPte)) MI_MAKE_DIRTY_PAGE(&TempPte);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Write it */
|
2010-06-06 18:45:46 +00:00
|
|
|
MI_WRITE_VALID_PTE(PointerPte, TempPte);
|
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Did we manually acquire the lock */
|
|
|
|
if (HaveLock)
|
|
|
|
{
|
|
|
|
/* Get the PFN entry */
|
|
|
|
Pfn1 = MI_PFN_ELEMENT(PageFrameNumber);
|
|
|
|
|
|
|
|
/* Windows does these sanity checks */
|
|
|
|
ASSERT(Pfn1->u1.Event == 0);
|
|
|
|
ASSERT(Pfn1->u3.e1.PrototypePte == 0);
|
|
|
|
}
|
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
|
|
|
// It's all good now
|
|
|
|
//
|
[NTOS]: A few key changes to the page fault path:
1) MiCheckVirtualAddress should be called *after* determining if the PTE is a Demand Zero PTE. This is because when memory is allocated with MEM_RESERVE, and then MEM_COMMIT is called later, the VAD does not have the MemCommit flag set to TRUE. As such, MiCheckVirtualAddress returns MM_NOACCESS for the VAD (even though one is found) and the demand zero fault results in an access violation. Double-checked with Windows and this is the right behavior.
2) MiCheckVirtualAddress now supports non-commited reserve VADs (ie: trying to access MEM_RESERVE memory). It used to ASSERT, now it returns MM_NOACCESS so an access violation is raised. Before change #1, this would also happen if MEM_COMMIT was later performed on the ranges, but this is now fixed.
3) When calling MiResolveDemandZeroFault, we should not make the PDE a demand zero PDE. This is senseless. The whole point is that the PDE does exist, and MiInitializePfn needs it to keep track of the page table allocation. Removed the nonsensical line of code which performed cleard the PDE during a demand-zero fault.
I am able to boot to 3rd stage with these changes, so I have seen no regressions. Additionally, with these changes, the as-of-yet-uncommitted VAD-based Virtual Memory code completes 1st stage setup successfully, instead of instantly crashing on boot.
svn path=/trunk/; revision=55894
2012-02-27 23:42:22 +00:00
|
|
|
DPRINT("Demand zero page has now been paged in\n");
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
return STATUS_PAGE_FAULT_DEMAND_ZERO;
|
|
|
|
}
|
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiCompleteProtoPteFault(IN BOOLEAN StoreInstruction,
|
|
|
|
IN PVOID Address,
|
|
|
|
IN PMMPTE PointerPte,
|
|
|
|
IN PMMPTE PointerProtoPte,
|
|
|
|
IN KIRQL OldIrql,
|
2012-08-03 11:34:35 +00:00
|
|
|
IN PMMPFN* LockedProtoPfn)
|
2010-07-22 20:52:23 +00:00
|
|
|
{
|
|
|
|
MMPTE TempPte;
|
2012-03-04 17:42:56 +00:00
|
|
|
PMMPTE OriginalPte, PageTablePte;
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
ULONG_PTR Protection;
|
2010-07-22 20:52:23 +00:00
|
|
|
PFN_NUMBER PageFrameIndex;
|
2012-07-31 06:47:47 +00:00
|
|
|
PMMPFN Pfn1, Pfn2;
|
2012-07-31 07:11:52 +00:00
|
|
|
BOOLEAN OriginalProtection, DirtyPage;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
/* Must be called with an valid prototype PTE, with the PFN lock held */
|
|
|
|
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
|
|
|
|
ASSERT(PointerProtoPte->u.Hard.Valid == 1);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
/* Get the page */
|
|
|
|
PageFrameIndex = PFN_FROM_PTE(PointerProtoPte);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Get the PFN entry and set it as a prototype PTE */
|
|
|
|
Pfn1 = MiGetPfnEntry(PageFrameIndex);
|
|
|
|
Pfn1->u3.e1.PrototypePte = 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-03-04 17:42:56 +00:00
|
|
|
/* Increment the share count for the page table */
|
|
|
|
// FIXME: This doesn't work because we seem to bump the sharecount to two, and MiDeletePte gets annoyed and ASSERTs.
|
|
|
|
// This could be beause MiDeletePte is now being called from strange code in Rosmm
|
|
|
|
PageTablePte = MiAddressToPte(PointerPte);
|
|
|
|
Pfn2 = MiGetPfnEntry(PageTablePte->u.Hard.PageFrameNumber);
|
|
|
|
//Pfn2->u2.ShareCount++;
|
2013-06-02 19:04:02 +00:00
|
|
|
DBG_UNREFERENCED_LOCAL_VARIABLE(Pfn2);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Check where we should be getting the protection information from */
|
|
|
|
if (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
|
|
|
|
{
|
|
|
|
/* Get the protection from the PTE, there's no real Proto PTE data */
|
|
|
|
Protection = PointerPte->u.Soft.Protection;
|
2012-07-31 07:11:52 +00:00
|
|
|
|
|
|
|
/* Remember that we did not use the proto protection */
|
|
|
|
OriginalProtection = FALSE;
|
2010-10-04 18:51:07 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Get the protection from the original PTE link */
|
|
|
|
OriginalPte = &Pfn1->OriginalPte;
|
|
|
|
Protection = OriginalPte->u.Soft.Protection;
|
2012-07-31 07:11:52 +00:00
|
|
|
|
|
|
|
/* Remember that we used the original protection */
|
|
|
|
OriginalProtection = TRUE;
|
|
|
|
|
|
|
|
/* Check if this was a write on a read only proto */
|
|
|
|
if ((StoreInstruction) && !(Protection & MM_READWRITE))
|
|
|
|
{
|
|
|
|
/* Clear the flag */
|
|
|
|
StoreInstruction = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if this was a write on a non-COW page */
|
|
|
|
DirtyPage = FALSE;
|
|
|
|
if ((StoreInstruction) && ((Protection & MM_WRITECOPY) != MM_WRITECOPY))
|
|
|
|
{
|
|
|
|
/* Then the page should be marked dirty */
|
|
|
|
DirtyPage = TRUE;
|
|
|
|
|
|
|
|
/* ReactOS check */
|
|
|
|
ASSERT(Pfn1->OriginalPte.u.Soft.Prototype != 0);
|
2010-10-04 18:51:07 +00:00
|
|
|
}
|
2010-07-22 20:52:23 +00:00
|
|
|
|
2012-08-03 11:34:35 +00:00
|
|
|
/* Did we get a locked incoming PFN? */
|
|
|
|
if (*LockedProtoPfn)
|
|
|
|
{
|
|
|
|
/* Drop a reference */
|
|
|
|
ASSERT((*LockedProtoPfn)->u3.e2.ReferenceCount >= 1);
|
|
|
|
MiDereferencePfnAndDropLockCount(*LockedProtoPfn);
|
|
|
|
*LockedProtoPfn = NULL;
|
|
|
|
}
|
2012-07-31 07:11:52 +00:00
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
/* Release the PFN lock */
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Remove caching bits */
|
|
|
|
Protection &= ~(MM_NOCACHE | MM_NOACCESS);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 07:11:52 +00:00
|
|
|
/* Setup caching */
|
|
|
|
if (Pfn1->u3.e1.CacheAttribute == MiWriteCombined)
|
|
|
|
{
|
|
|
|
/* Write combining, no caching */
|
|
|
|
MI_PAGE_DISABLE_CACHE(&TempPte);
|
|
|
|
MI_PAGE_WRITE_COMBINED(&TempPte);
|
|
|
|
}
|
|
|
|
else if (Pfn1->u3.e1.CacheAttribute == MiNonCached)
|
|
|
|
{
|
|
|
|
/* Write through, no caching */
|
|
|
|
MI_PAGE_DISABLE_CACHE(&TempPte);
|
|
|
|
MI_PAGE_WRITE_THROUGH(&TempPte);
|
|
|
|
}
|
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Check if this is a kernel or user address */
|
|
|
|
if (Address < MmSystemRangeStart)
|
|
|
|
{
|
|
|
|
/* Build the user PTE */
|
|
|
|
MI_MAKE_HARDWARE_PTE_USER(&TempPte, PointerPte, Protection, PageFrameIndex);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Build the kernel PTE */
|
|
|
|
MI_MAKE_HARDWARE_PTE(&TempPte, PointerPte, Protection, PageFrameIndex);
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 07:11:52 +00:00
|
|
|
/* Set the dirty flag if needed */
|
|
|
|
if (DirtyPage) TempPte.u.Hard.Dirty = TRUE;
|
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
/* Write the PTE */
|
|
|
|
MI_WRITE_VALID_PTE(PointerPte, TempPte);
|
|
|
|
|
2012-07-31 07:11:52 +00:00
|
|
|
/* Reset the protection if needed */
|
|
|
|
if (OriginalProtection) Protection = MM_ZERO_ACCESS;
|
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
/* Return success */
|
2012-07-31 07:11:52 +00:00
|
|
|
ASSERT(PointerPte == MiAddressToPte(Address));
|
2010-07-22 20:52:23 +00:00
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2012-03-26 07:41:47 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiResolveTransitionFault(IN PVOID FaultingAddress,
|
|
|
|
IN PMMPTE PointerPte,
|
|
|
|
IN PEPROCESS CurrentProcess,
|
|
|
|
IN KIRQL OldIrql,
|
|
|
|
OUT PVOID *InPageBlock)
|
|
|
|
{
|
|
|
|
PFN_NUMBER PageFrameIndex;
|
|
|
|
PMMPFN Pfn1;
|
|
|
|
MMPTE TempPte;
|
|
|
|
PMMPTE PointerToPteForProtoPage;
|
|
|
|
DPRINT1("Transition fault on 0x%p with PTE 0x%lx in process %s\n", FaultingAddress, PointerPte, CurrentProcess->ImageFileName);
|
|
|
|
|
|
|
|
/* Windowss does this check */
|
|
|
|
ASSERT(*InPageBlock == NULL);
|
|
|
|
|
|
|
|
/* ARM3 doesn't support this path */
|
|
|
|
ASSERT(OldIrql != MM_NOIRQL);
|
|
|
|
|
|
|
|
/* Capture the PTE and make sure it's in transition format */
|
|
|
|
TempPte = *PointerPte;
|
|
|
|
ASSERT((TempPte.u.Soft.Valid == 0) &&
|
|
|
|
(TempPte.u.Soft.Prototype == 0) &&
|
|
|
|
(TempPte.u.Soft.Transition == 1));
|
|
|
|
|
|
|
|
/* Get the PFN and the PFN entry */
|
|
|
|
PageFrameIndex = TempPte.u.Trans.PageFrameNumber;
|
|
|
|
DPRINT1("Transition PFN: %lx\n", PageFrameIndex);
|
|
|
|
Pfn1 = MiGetPfnEntry(PageFrameIndex);
|
|
|
|
|
|
|
|
/* One more transition fault! */
|
|
|
|
InterlockedIncrement(&KeGetCurrentPrcb()->MmTransitionCount);
|
|
|
|
|
|
|
|
/* This is from ARM3 -- Windows normally handles this here */
|
|
|
|
ASSERT(Pfn1->u4.InPageError == 0);
|
|
|
|
|
|
|
|
/* Not supported in ARM3 */
|
|
|
|
ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
|
|
|
|
|
|
|
|
/* Windows checks there's some free pages and this isn't an in-page error */
|
2012-09-28 12:17:23 +00:00
|
|
|
ASSERT(MmAvailablePages > 0);
|
2012-03-26 07:41:47 +00:00
|
|
|
ASSERT(Pfn1->u4.InPageError == 0);
|
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* ReactOS checks for this */
|
|
|
|
ASSERT(MmAvailablePages > 32);
|
|
|
|
|
2012-03-26 07:41:47 +00:00
|
|
|
/* Was this a transition page in the valid list, or free/zero list? */
|
|
|
|
if (Pfn1->u3.e1.PageLocation == ActiveAndValid)
|
|
|
|
{
|
|
|
|
/* All Windows does here is a bunch of sanity checks */
|
|
|
|
DPRINT1("Transition in active list\n");
|
|
|
|
ASSERT((Pfn1->PteAddress >= MiAddressToPte(MmPagedPoolStart)) &&
|
|
|
|
(Pfn1->PteAddress <= MiAddressToPte(MmPagedPoolEnd)));
|
|
|
|
ASSERT(Pfn1->u2.ShareCount != 0);
|
|
|
|
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Otherwise, the page is removed from its list */
|
|
|
|
DPRINT1("Transition page in free/zero list\n");
|
|
|
|
MiUnlinkPageFromList(Pfn1);
|
2012-08-03 11:34:35 +00:00
|
|
|
MiReferenceUnusedPageAndBumpLockCount(Pfn1);
|
2012-03-26 07:41:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* At this point, there should no longer be any in-page errors */
|
|
|
|
ASSERT(Pfn1->u4.InPageError == 0);
|
|
|
|
|
|
|
|
/* Check if this was a PFN with no more share references */
|
2012-08-03 11:34:35 +00:00
|
|
|
if (Pfn1->u2.ShareCount == 0) MiDropLockCount(Pfn1);
|
2012-03-26 07:41:47 +00:00
|
|
|
|
|
|
|
/* Bump the share count and make the page valid */
|
|
|
|
Pfn1->u2.ShareCount++;
|
|
|
|
Pfn1->u3.e1.PageLocation = ActiveAndValid;
|
|
|
|
|
|
|
|
/* Prototype PTEs are in paged pool, which itself might be in transition */
|
|
|
|
if (FaultingAddress >= MmSystemRangeStart)
|
|
|
|
{
|
|
|
|
/* Check if this is a paged pool PTE in transition state */
|
|
|
|
PointerToPteForProtoPage = MiAddressToPte(PointerPte);
|
|
|
|
TempPte = *PointerToPteForProtoPage;
|
|
|
|
if ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Transition == 1))
|
|
|
|
{
|
|
|
|
/* This isn't yet supported */
|
|
|
|
DPRINT1("Double transition fault not yet supported\n");
|
|
|
|
ASSERT(FALSE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Build the transition PTE -- maybe a macro? */
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 0);
|
|
|
|
ASSERT(PointerPte->u.Trans.Prototype == 0);
|
|
|
|
ASSERT(PointerPte->u.Trans.Transition == 1);
|
|
|
|
TempPte.u.Long = (PointerPte->u.Long & ~0xFFF) |
|
|
|
|
(MmProtectToPteMask[PointerPte->u.Trans.Protection]) |
|
|
|
|
MiDetermineUserGlobalPteMask(PointerPte);
|
|
|
|
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Is the PTE writeable? */
|
|
|
|
if (((Pfn1->u3.e1.Modified) && (TempPte.u.Hard.Write)) &&
|
|
|
|
(TempPte.u.Hard.CopyOnWrite == 0))
|
|
|
|
{
|
|
|
|
/* Make it dirty */
|
|
|
|
TempPte.u.Hard.Dirty = TRUE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Make it clean */
|
|
|
|
TempPte.u.Hard.Dirty = FALSE;
|
|
|
|
}
|
2012-03-26 07:41:47 +00:00
|
|
|
|
|
|
|
/* Write the valid PTE */
|
|
|
|
MI_WRITE_VALID_PTE(PointerPte, TempPte);
|
|
|
|
|
|
|
|
/* Return success */
|
|
|
|
return STATUS_PAGE_FAULT_TRANSITION;
|
|
|
|
}
|
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiResolveProtoPteFault(IN BOOLEAN StoreInstruction,
|
|
|
|
IN PVOID Address,
|
|
|
|
IN PMMPTE PointerPte,
|
|
|
|
IN PMMPTE PointerProtoPte,
|
|
|
|
IN OUT PMMPFN *OutPfn,
|
|
|
|
OUT PVOID *PageFileData,
|
|
|
|
OUT PMMPTE PteValue,
|
|
|
|
IN PEPROCESS Process,
|
|
|
|
IN KIRQL OldIrql,
|
|
|
|
IN PVOID TrapInformation)
|
|
|
|
{
|
2012-03-26 07:41:47 +00:00
|
|
|
MMPTE TempPte, PteContents;
|
2010-07-22 20:52:23 +00:00
|
|
|
PMMPFN Pfn1;
|
|
|
|
PFN_NUMBER PageFrameIndex;
|
2010-10-04 18:51:07 +00:00
|
|
|
NTSTATUS Status;
|
2012-03-26 07:41:47 +00:00
|
|
|
PVOID InPageBlock = NULL;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
/* Must be called with an invalid, prototype PTE, with the PFN lock held */
|
|
|
|
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 0);
|
|
|
|
ASSERT(PointerPte->u.Soft.Prototype == 1);
|
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Read the prototype PTE and check if it's valid */
|
2010-07-22 20:52:23 +00:00
|
|
|
TempPte = *PointerProtoPte;
|
2010-10-04 18:51:07 +00:00
|
|
|
if (TempPte.u.Hard.Valid == 1)
|
|
|
|
{
|
|
|
|
/* One more user of this mapped page */
|
|
|
|
PageFrameIndex = PFN_FROM_PTE(&TempPte);
|
|
|
|
Pfn1 = MiGetPfnEntry(PageFrameIndex);
|
|
|
|
Pfn1->u2.ShareCount++;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Call it a transition */
|
|
|
|
InterlockedIncrement(&KeGetCurrentPrcb()->MmTransitionCount);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Complete the prototype PTE fault -- this will release the PFN lock */
|
|
|
|
return MiCompleteProtoPteFault(StoreInstruction,
|
|
|
|
Address,
|
|
|
|
PointerPte,
|
|
|
|
PointerProtoPte,
|
|
|
|
OldIrql,
|
2012-08-03 11:34:35 +00:00
|
|
|
OutPfn);
|
2010-10-04 18:51:07 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Make sure there's some protection mask */
|
|
|
|
if (TempPte.u.Long == 0)
|
|
|
|
{
|
|
|
|
/* Release the lock */
|
|
|
|
DPRINT1("Access on reserved section?\n");
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
return STATUS_ACCESS_VIOLATION;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-03-26 07:41:47 +00:00
|
|
|
/* Check for access rights on the PTE proper */
|
|
|
|
PteContents = *PointerPte;
|
|
|
|
if (PteContents.u.Soft.PageFileHigh != MI_PTE_LOOKUP_NEEDED)
|
|
|
|
{
|
|
|
|
if (!PteContents.u.Proto.ReadOnly)
|
|
|
|
{
|
2012-07-31 06:47:47 +00:00
|
|
|
/* FIXME: CHECK FOR ACCESS */
|
|
|
|
|
|
|
|
/* Check for copy on write page */
|
|
|
|
if ((TempPte.u.Soft.Protection & MM_WRITECOPY) == MM_WRITECOPY)
|
|
|
|
{
|
|
|
|
/* Not yet supported */
|
|
|
|
ASSERT(FALSE);
|
|
|
|
}
|
2012-03-26 07:41:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-07-31 06:47:47 +00:00
|
|
|
/* Check for copy on write page */
|
|
|
|
if ((PteContents.u.Soft.Protection & MM_WRITECOPY) == MM_WRITECOPY)
|
|
|
|
{
|
|
|
|
/* Not yet supported */
|
|
|
|
ASSERT(FALSE);
|
|
|
|
}
|
2012-03-26 07:41:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for clone PTEs */
|
|
|
|
if (PointerPte <= MiHighestUserPte) ASSERT(Process->CloneRoot == NULL);
|
|
|
|
|
|
|
|
/* We don't support mapped files yet */
|
2010-10-04 18:51:07 +00:00
|
|
|
ASSERT(TempPte.u.Soft.Prototype == 0);
|
2012-03-26 07:41:47 +00:00
|
|
|
|
|
|
|
/* We might however have transition PTEs */
|
|
|
|
if (TempPte.u.Soft.Transition == 1)
|
|
|
|
{
|
|
|
|
/* Resolve the transition fault */
|
|
|
|
ASSERT(OldIrql != MM_NOIRQL);
|
|
|
|
Status = MiResolveTransitionFault(Address,
|
|
|
|
PointerProtoPte,
|
|
|
|
Process,
|
|
|
|
OldIrql,
|
|
|
|
&InPageBlock);
|
|
|
|
ASSERT(NT_SUCCESS(Status));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We also don't support paged out pages */
|
|
|
|
ASSERT(TempPte.u.Soft.PageFileHigh == 0);
|
|
|
|
|
|
|
|
/* Resolve the demand zero fault */
|
|
|
|
Status = MiResolveDemandZeroFault(Address,
|
|
|
|
PointerProtoPte,
|
|
|
|
Process,
|
|
|
|
OldIrql);
|
|
|
|
ASSERT(NT_SUCCESS(Status));
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
/* Complete the prototype PTE fault -- this will release the PFN lock */
|
2010-10-04 18:51:07 +00:00
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 0);
|
2010-07-22 20:52:23 +00:00
|
|
|
return MiCompleteProtoPteFault(StoreInstruction,
|
|
|
|
Address,
|
|
|
|
PointerPte,
|
|
|
|
PointerProtoPte,
|
|
|
|
OldIrql,
|
2012-08-03 11:34:35 +00:00
|
|
|
OutPfn);
|
2010-07-22 20:52:23 +00:00
|
|
|
}
|
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiDispatchFault(IN BOOLEAN StoreInstruction,
|
|
|
|
IN PVOID Address,
|
|
|
|
IN PMMPTE PointerPte,
|
2010-07-22 20:52:23 +00:00
|
|
|
IN PMMPTE PointerProtoPte,
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
IN BOOLEAN Recursive,
|
|
|
|
IN PEPROCESS Process,
|
|
|
|
IN PVOID TrapInformation,
|
2012-07-31 07:32:19 +00:00
|
|
|
IN PMMVAD Vad)
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
{
|
|
|
|
MMPTE TempPte;
|
2010-07-22 20:52:23 +00:00
|
|
|
KIRQL OldIrql, LockIrql;
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
NTSTATUS Status;
|
2010-07-22 20:52:23 +00:00
|
|
|
PMMPTE SuperProtoPte;
|
2012-08-03 11:34:35 +00:00
|
|
|
PMMPFN Pfn1, OutPfn = NULL;
|
2012-12-30 11:54:40 +00:00
|
|
|
PFN_NUMBER PageFrameIndex;
|
|
|
|
PFN_COUNT PteCount, ProcessedPtes;
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
DPRINT("ARM3 Page Fault Dispatcher for address: %p in process: %p\n",
|
2010-01-03 05:10:09 +00:00
|
|
|
Address,
|
|
|
|
Process);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-02-06 09:26:23 +00:00
|
|
|
/* Make sure the addresses are ok */
|
|
|
|
ASSERT(PointerPte == MiAddressToPte(Address));
|
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
|
|
|
// Make sure APCs are off and we're not at dispatch
|
|
|
|
//
|
2010-07-22 20:52:23 +00:00
|
|
|
OldIrql = KeGetCurrentIrql();
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
ASSERT(OldIrql <= APC_LEVEL);
|
2010-07-22 20:52:23 +00:00
|
|
|
ASSERT(KeAreAllApcsDisabled() == TRUE);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
|
|
|
// Grab a copy of the PTE
|
|
|
|
//
|
|
|
|
TempPte = *PointerPte;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
/* Do we have a prototype PTE? */
|
|
|
|
if (PointerProtoPte)
|
|
|
|
{
|
|
|
|
/* This should never happen */
|
|
|
|
ASSERT(!MI_IS_PHYSICAL_ADDRESS(PointerProtoPte));
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Check if this is a kernel-mode address */
|
|
|
|
SuperProtoPte = MiAddressToPte(PointerProtoPte);
|
|
|
|
if (Address >= MmSystemRangeStart)
|
|
|
|
{
|
|
|
|
/* Lock the PFN database */
|
|
|
|
LockIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Has the PTE been made valid yet? */
|
|
|
|
if (!SuperProtoPte->u.Hard.Valid)
|
|
|
|
{
|
2012-07-31 07:11:52 +00:00
|
|
|
ASSERT(FALSE);
|
2010-10-04 18:51:07 +00:00
|
|
|
}
|
2012-07-31 07:11:52 +00:00
|
|
|
else if (PointerPte->u.Hard.Valid == 1)
|
2010-10-04 18:51:07 +00:00
|
|
|
{
|
2012-07-31 07:11:52 +00:00
|
|
|
ASSERT(FALSE);
|
2010-10-04 18:51:07 +00:00
|
|
|
}
|
2012-07-31 07:11:52 +00:00
|
|
|
|
|
|
|
/* Resolve the fault -- this will release the PFN lock */
|
|
|
|
Status = MiResolveProtoPteFault(StoreInstruction,
|
|
|
|
Address,
|
|
|
|
PointerPte,
|
|
|
|
PointerProtoPte,
|
2012-08-03 11:34:35 +00:00
|
|
|
&OutPfn,
|
2012-07-31 07:11:52 +00:00
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
Process,
|
|
|
|
LockIrql,
|
|
|
|
TrapInformation);
|
|
|
|
ASSERT(Status == STATUS_SUCCESS);
|
|
|
|
|
|
|
|
/* Complete this as a transition fault */
|
|
|
|
ASSERT(OldIrql == KeGetCurrentIrql());
|
|
|
|
ASSERT(OldIrql <= APC_LEVEL);
|
|
|
|
ASSERT(KeAreAllApcsDisabled() == TRUE);
|
|
|
|
return Status;
|
2010-10-04 18:51:07 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-07-31 07:32:19 +00:00
|
|
|
/* We only handle the lookup path */
|
2010-10-04 18:51:07 +00:00
|
|
|
ASSERT(PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 07:32:19 +00:00
|
|
|
/* Is there a non-image VAD? */
|
|
|
|
if ((Vad) &&
|
|
|
|
(Vad->u.VadFlags.VadType != VadImageMap) &&
|
|
|
|
!(Vad->u2.VadFlags2.ExtendableFile))
|
|
|
|
{
|
|
|
|
/* One day, ReactOS will cluster faults */
|
|
|
|
ASSERT(Address <= MM_HIGHEST_USER_ADDRESS);
|
|
|
|
DPRINT("Should cluster fault, but won't\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only one PTE to handle for now */
|
|
|
|
PteCount = 1;
|
|
|
|
ProcessedPtes = 0;
|
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Lock the PFN database */
|
|
|
|
LockIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-31 07:32:19 +00:00
|
|
|
/* We only handle the valid path */
|
2010-10-04 18:51:07 +00:00
|
|
|
ASSERT(SuperProtoPte->u.Hard.Valid == 1);
|
2010-07-22 20:52:23 +00:00
|
|
|
|
2012-07-31 07:32:19 +00:00
|
|
|
/* Capture the PTE */
|
|
|
|
TempPte = *PointerProtoPte;
|
|
|
|
|
|
|
|
/* Loop to handle future case of clustered faults */
|
|
|
|
while (TRUE)
|
|
|
|
{
|
|
|
|
/* For our current usage, this should be true */
|
2012-08-03 11:34:35 +00:00
|
|
|
if (TempPte.u.Hard.Valid == 1)
|
|
|
|
{
|
|
|
|
/* Bump the share count on the PTE */
|
|
|
|
PageFrameIndex = PFN_FROM_PTE(&TempPte);
|
|
|
|
Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
|
|
|
|
Pfn1->u2.ShareCount++;
|
|
|
|
}
|
|
|
|
else if ((TempPte.u.Soft.Prototype == 0) &&
|
|
|
|
(TempPte.u.Soft.Transition == 1))
|
|
|
|
{
|
2012-09-01 02:32:25 +00:00
|
|
|
/* This is a standby page, bring it back from the cache */
|
|
|
|
PageFrameIndex = TempPte.u.Trans.PageFrameNumber;
|
2012-09-02 18:54:05 +00:00
|
|
|
DPRINT("oooh, shiny, a soft fault! 0x%lx\n", PageFrameIndex);
|
2012-09-01 02:32:25 +00:00
|
|
|
Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
|
|
|
|
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
|
2013-06-02 19:04:02 +00:00
|
|
|
|
2012-09-01 02:32:25 +00:00
|
|
|
/* Should not yet happen in ReactOS */
|
|
|
|
ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
|
|
|
|
ASSERT(Pfn1->u4.InPageError == 0);
|
2013-06-02 19:04:02 +00:00
|
|
|
|
2012-09-01 02:32:25 +00:00
|
|
|
/* Get the page */
|
|
|
|
MiUnlinkPageFromList(Pfn1);
|
2013-06-02 19:04:02 +00:00
|
|
|
|
2012-09-01 02:32:25 +00:00
|
|
|
/* Bump its reference count */
|
|
|
|
ASSERT(Pfn1->u2.ShareCount == 0);
|
|
|
|
InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
|
|
|
|
Pfn1->u2.ShareCount++;
|
2013-06-02 19:04:02 +00:00
|
|
|
|
2012-09-01 02:32:25 +00:00
|
|
|
/* Make it valid again */
|
|
|
|
/* This looks like another macro.... */
|
|
|
|
Pfn1->u3.e1.PageLocation = ActiveAndValid;
|
|
|
|
ASSERT(PointerProtoPte->u.Hard.Valid == 0);
|
|
|
|
ASSERT(PointerProtoPte->u.Trans.Prototype == 0);
|
|
|
|
ASSERT(PointerProtoPte->u.Trans.Transition == 1);
|
|
|
|
TempPte.u.Long = (PointerProtoPte->u.Long & ~0xFFF) |
|
|
|
|
MmProtectToPteMask[PointerProtoPte->u.Trans.Protection];
|
|
|
|
TempPte.u.Hard.Valid = 1;
|
|
|
|
TempPte.u.Hard.Accessed = 1;
|
2013-06-02 19:04:02 +00:00
|
|
|
|
2012-09-01 02:32:25 +00:00
|
|
|
/* Is the PTE writeable? */
|
|
|
|
if (((Pfn1->u3.e1.Modified) && (TempPte.u.Hard.Write)) &&
|
|
|
|
(TempPte.u.Hard.CopyOnWrite == 0))
|
|
|
|
{
|
|
|
|
/* Make it dirty */
|
|
|
|
TempPte.u.Hard.Dirty = TRUE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Make it clean */
|
|
|
|
TempPte.u.Hard.Dirty = FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write the valid PTE */
|
|
|
|
MI_WRITE_VALID_PTE(PointerProtoPte, TempPte);
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 0);
|
2012-08-03 11:34:35 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Page is invalid, get out of the loop */
|
|
|
|
break;
|
|
|
|
}
|
2012-07-31 07:32:19 +00:00
|
|
|
|
|
|
|
/* One more done, was it the last? */
|
|
|
|
if (++ProcessedPtes == PteCount)
|
|
|
|
{
|
|
|
|
/* Complete the fault */
|
|
|
|
MiCompleteProtoPteFault(StoreInstruction,
|
2010-10-04 18:51:07 +00:00
|
|
|
Address,
|
|
|
|
PointerPte,
|
|
|
|
PointerProtoPte,
|
|
|
|
LockIrql,
|
2012-08-03 11:34:35 +00:00
|
|
|
&OutPfn);
|
2012-07-31 07:32:19 +00:00
|
|
|
|
|
|
|
/* THIS RELEASES THE PFN LOCK! */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No clustered faults yet */
|
|
|
|
ASSERT(FALSE);
|
|
|
|
}
|
|
|
|
|
2012-08-03 11:34:35 +00:00
|
|
|
/* Did we resolve the fault? */
|
|
|
|
if (ProcessedPtes)
|
|
|
|
{
|
|
|
|
/* Bump the transition count */
|
2012-12-30 11:54:40 +00:00
|
|
|
InterlockedExchangeAddSizeT(&KeGetCurrentPrcb()->MmTransitionCount, ProcessedPtes);
|
2012-08-03 11:34:35 +00:00
|
|
|
ProcessedPtes--;
|
|
|
|
|
|
|
|
/* Loop all the processing we did */
|
|
|
|
ASSERT(ProcessedPtes == 0);
|
|
|
|
|
|
|
|
/* Complete this as a transition fault */
|
|
|
|
ASSERT(OldIrql == KeGetCurrentIrql());
|
|
|
|
ASSERT(OldIrql <= APC_LEVEL);
|
|
|
|
ASSERT(KeAreAllApcsDisabled() == TRUE);
|
|
|
|
return STATUS_PAGE_FAULT_TRANSITION;
|
|
|
|
}
|
2012-07-31 07:32:19 +00:00
|
|
|
|
2012-08-03 11:34:35 +00:00
|
|
|
/* We did not -- PFN lock is still held, prepare to resolve prototype PTE fault */
|
|
|
|
OutPfn = MI_PFN_ELEMENT(SuperProtoPte->u.Hard.PageFrameNumber);
|
|
|
|
MiReferenceUsedPageAndBumpLockCount(OutPfn);
|
|
|
|
ASSERT(OutPfn->u3.e2.ReferenceCount > 1);
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 0);
|
2012-07-31 07:32:19 +00:00
|
|
|
|
2012-08-03 11:34:35 +00:00
|
|
|
/* Resolve the fault -- this will release the PFN lock */
|
|
|
|
Status = MiResolveProtoPteFault(StoreInstruction,
|
|
|
|
Address,
|
|
|
|
PointerPte,
|
|
|
|
PointerProtoPte,
|
|
|
|
&OutPfn,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
Process,
|
|
|
|
LockIrql,
|
|
|
|
TrapInformation);
|
2012-09-02 08:13:24 +00:00
|
|
|
//ASSERT(Status != STATUS_ISSUE_PAGING_IO);
|
|
|
|
//ASSERT(Status != STATUS_REFAULT);
|
|
|
|
//ASSERT(Status != STATUS_PTE_CHANGED);
|
2012-08-03 11:34:35 +00:00
|
|
|
|
|
|
|
/* Did the routine clean out the PFN or should we? */
|
|
|
|
if (OutPfn)
|
|
|
|
{
|
|
|
|
/* We had a locked PFN, so acquire the PFN lock to dereference it */
|
|
|
|
ASSERT(PointerProtoPte != NULL);
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
|
|
|
|
|
|
|
/* Dereference the locked PFN */
|
|
|
|
MiDereferencePfnAndDropLockCount(OutPfn);
|
|
|
|
ASSERT(OutPfn->u3.e2.ReferenceCount >= 1);
|
|
|
|
|
|
|
|
/* And now release the lock */
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
}
|
2010-07-22 20:52:23 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Complete this as a transition fault */
|
|
|
|
ASSERT(OldIrql == KeGetCurrentIrql());
|
|
|
|
ASSERT(OldIrql <= APC_LEVEL);
|
|
|
|
ASSERT(KeAreAllApcsDisabled() == TRUE);
|
2012-08-03 11:34:35 +00:00
|
|
|
return Status;
|
2010-10-04 18:51:07 +00:00
|
|
|
}
|
2010-07-22 20:52:23 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
2012-02-29 23:11:21 +00:00
|
|
|
// The PTE must be invalid but not completely empty. It must also not be a
|
2012-07-31 06:47:47 +00:00
|
|
|
// prototype PTE as that scenario should've been handled above. These are
|
|
|
|
// all Windows checks
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
|
|
|
ASSERT(TempPte.u.Hard.Valid == 0);
|
2012-02-29 23:11:21 +00:00
|
|
|
ASSERT(TempPte.u.Soft.Prototype == 0);
|
|
|
|
ASSERT(TempPte.u.Long != 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
2012-02-29 23:11:21 +00:00
|
|
|
// No transition or page file software PTEs in ARM3 yet, so this must be a
|
2012-07-31 06:47:47 +00:00
|
|
|
// demand zero page. These are all ReactOS checks
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
|
|
|
ASSERT(TempPte.u.Soft.Transition == 0);
|
|
|
|
ASSERT(TempPte.u.Soft.PageFileHigh == 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
//
|
|
|
|
// If we got this far, the PTE can only be a demand zero PTE, which is what
|
|
|
|
// we want. Go handle it!
|
|
|
|
//
|
|
|
|
Status = MiResolveDemandZeroFault(Address,
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
PointerPte,
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
Process,
|
2010-05-12 20:48:15 +00:00
|
|
|
MM_NOIRQL);
|
2010-11-02 14:58:39 +00:00
|
|
|
ASSERT(KeAreAllApcsDisabled() == TRUE);
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
if (NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Make sure we're returning in a sane state and pass the status down
|
|
|
|
//
|
2010-11-02 14:58:39 +00:00
|
|
|
ASSERT(OldIrql == KeGetCurrentIrql());
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Generate an access fault
|
|
|
|
//
|
|
|
|
return STATUS_ACCESS_VIOLATION;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmArmAccessFault(IN BOOLEAN StoreInstruction,
|
|
|
|
IN PVOID Address,
|
|
|
|
IN KPROCESSOR_MODE Mode,
|
|
|
|
IN PVOID TrapInformation)
|
|
|
|
{
|
|
|
|
KIRQL OldIrql = KeGetCurrentIrql(), LockIrql;
|
2012-02-05 17:19:58 +00:00
|
|
|
PMMPTE ProtoPte = NULL;
|
|
|
|
PMMPTE PointerPte = MiAddressToPte(Address);
|
|
|
|
PMMPDE PointerPde = MiAddressToPde(Address);
|
|
|
|
#if (_MI_PAGING_LEVELS >= 3)
|
|
|
|
PMMPDE PointerPpe = MiAddressToPpe(Address);
|
|
|
|
#if (_MI_PAGING_LEVELS == 4)
|
|
|
|
PMMPDE PointerPxe = MiAddressToPxe(Address);
|
|
|
|
#endif
|
|
|
|
#endif
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
MMPTE TempPte;
|
|
|
|
PETHREAD CurrentThread;
|
2010-07-22 18:26:04 +00:00
|
|
|
PEPROCESS CurrentProcess;
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
NTSTATUS Status;
|
2010-07-22 18:26:04 +00:00
|
|
|
PMMSUPPORT WorkingSet;
|
2010-07-22 18:37:27 +00:00
|
|
|
ULONG ProtectionCode;
|
|
|
|
PMMVAD Vad;
|
|
|
|
PFN_NUMBER PageFrameIndex;
|
2010-09-29 01:10:28 +00:00
|
|
|
ULONG Color;
|
2012-07-21 19:07:11 +00:00
|
|
|
BOOLEAN IsSessionAddress;
|
|
|
|
PMMPFN Pfn1;
|
2012-02-05 17:19:58 +00:00
|
|
|
DPRINT("ARM3 FAULT AT: %p\n", Address);
|
2010-07-26 21:45:42 +00:00
|
|
|
|
2012-02-06 09:26:23 +00:00
|
|
|
/* Check for page fault on high IRQL */
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
if (OldIrql > APC_LEVEL)
|
|
|
|
{
|
2012-07-21 19:07:11 +00:00
|
|
|
#if (_MI_PAGING_LEVELS < 3)
|
|
|
|
/* Could be a page table for paged pool, which we'll allow */
|
|
|
|
if (MI_IS_SYSTEM_PAGE_TABLE_ADDRESS(Address)) MiSynchronizeSystemPde((PMMPDE)PointerPte);
|
|
|
|
MiCheckPdeForPagedPool(Address);
|
|
|
|
#endif
|
|
|
|
/* Check if any of the top-level pages are invalid */
|
|
|
|
if (
|
|
|
|
#if (_MI_PAGING_LEVELS == 4)
|
|
|
|
(PointerPxe->u.Hard.Valid == 0) ||
|
|
|
|
#endif
|
|
|
|
#if (_MI_PAGING_LEVELS >= 3)
|
|
|
|
(PointerPpe->u.Hard.Valid == 0) ||
|
|
|
|
#endif
|
|
|
|
(PointerPde->u.Hard.Valid == 0))
|
|
|
|
{
|
|
|
|
/* This fault is not valid, printf out some debugging help */
|
|
|
|
DbgPrint("MM:***PAGE FAULT AT IRQL > 1 Va %p, IRQL %lx\n",
|
|
|
|
Address,
|
|
|
|
OldIrql);
|
|
|
|
if (TrapInformation)
|
|
|
|
{
|
|
|
|
PKTRAP_FRAME TrapFrame = TrapInformation;
|
2012-12-30 11:54:40 +00:00
|
|
|
#ifdef _M_IX86
|
2012-07-21 19:07:11 +00:00
|
|
|
DbgPrint("MM:***EIP %p, EFL %p\n", TrapFrame->Eip, TrapFrame->EFlags);
|
|
|
|
DbgPrint("MM:***EAX %p, ECX %p EDX %p\n", TrapFrame->Eax, TrapFrame->Ecx, TrapFrame->Edx);
|
|
|
|
DbgPrint("MM:***EBX %p, ESI %p EDI %p\n", TrapFrame->Ebx, TrapFrame->Esi, TrapFrame->Edi);
|
2012-12-30 11:54:40 +00:00
|
|
|
#elif defined(_M_AMD64)
|
|
|
|
DbgPrint("MM:***RIP %p, EFL %p\n", TrapFrame->Rip, TrapFrame->EFlags);
|
|
|
|
DbgPrint("MM:***RAX %p, RCX %p RDX %p\n", TrapFrame->Rax, TrapFrame->Rcx, TrapFrame->Rdx);
|
|
|
|
DbgPrint("MM:***RBX %p, RSI %p RDI %p\n", TrapFrame->Rbx, TrapFrame->Rsi, TrapFrame->Rdi);
|
|
|
|
#endif
|
2012-07-21 19:07:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Tell the trap handler to fail */
|
|
|
|
return STATUS_IN_PAGE_ERROR | 0x10000000;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not yet implemented in ReactOS */
|
|
|
|
ASSERT(MI_IS_PAGE_LARGE(PointerPde) == FALSE);
|
|
|
|
ASSERT(((StoreInstruction) && (PointerPte->u.Hard.CopyOnWrite)) == FALSE);
|
|
|
|
|
|
|
|
/* Check if this was a write */
|
|
|
|
if (StoreInstruction)
|
|
|
|
{
|
|
|
|
/* Was it to a read-only page? */
|
|
|
|
Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
|
|
|
|
if (!(PointerPte->u.Long & PTE_READWRITE) &&
|
|
|
|
!(Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE))
|
|
|
|
{
|
|
|
|
/* Crash with distinguished bugcheck code */
|
|
|
|
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
PointerPte->u.Long,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
|
|
|
10);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Nothing is actually wrong */
|
|
|
|
DPRINT1("Fault at IRQL1 is ok\n");
|
|
|
|
return STATUS_SUCCESS;
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-02-06 09:26:23 +00:00
|
|
|
/* Check for kernel fault address */
|
2012-02-06 14:32:07 +00:00
|
|
|
if (Address >= MmSystemRangeStart)
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
{
|
2012-02-06 09:26:23 +00:00
|
|
|
/* Bail out, if the fault came from user mode */
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
if (Mode == UserMode) return STATUS_ACCESS_VIOLATION;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-02-05 17:19:58 +00:00
|
|
|
#if (_MI_PAGING_LEVELS == 4)
|
2012-07-21 19:07:11 +00:00
|
|
|
/* AMD64 system, check if PXE is invalid */
|
|
|
|
if (PointerPxe->u.Hard.Valid == 0)
|
2012-02-05 17:19:58 +00:00
|
|
|
{
|
|
|
|
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
StoreInstruction,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
2012-07-21 19:07:11 +00:00
|
|
|
7);
|
2012-02-05 17:19:58 +00:00
|
|
|
}
|
2010-07-26 21:45:42 +00:00
|
|
|
#endif
|
2012-07-21 19:07:11 +00:00
|
|
|
#if (_MI_PAGING_LEVELS == 4)
|
|
|
|
/* PAE/AMD64 system, check if PPE is invalid */
|
|
|
|
if (PointerPpe->u.Hard.Valid == 0)
|
2012-02-06 14:32:07 +00:00
|
|
|
{
|
2012-07-21 19:07:11 +00:00
|
|
|
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
StoreInstruction,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
|
|
|
5);
|
2012-02-06 14:32:07 +00:00
|
|
|
}
|
|
|
|
#endif
|
2012-07-21 19:07:11 +00:00
|
|
|
#if (_MI_PAGING_LEVELS == 2)
|
|
|
|
if (MI_IS_SYSTEM_PAGE_TABLE_ADDRESS(Address)) MiSynchronizeSystemPde((PMMPDE)PointerPte);
|
|
|
|
MiCheckPdeForPagedPool(Address);
|
|
|
|
#endif
|
2012-02-06 14:32:07 +00:00
|
|
|
|
2012-02-06 09:26:23 +00:00
|
|
|
/* Check if the PDE is invalid */
|
2011-01-18 22:42:44 +00:00
|
|
|
if (PointerPde->u.Hard.Valid == 0)
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
{
|
2012-07-21 19:07:11 +00:00
|
|
|
/* PDE (still) not valid, kill the system */
|
|
|
|
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
StoreInstruction,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
|
|
|
2);
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Not handling session faults yet */
|
|
|
|
IsSessionAddress = MI_IS_SESSION_ADDRESS(Address);
|
|
|
|
|
2012-02-06 09:26:23 +00:00
|
|
|
/* The PDE is valid, so read the PTE */
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
TempPte = *PointerPte;
|
|
|
|
if (TempPte.u.Hard.Valid == 1)
|
|
|
|
{
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Check if this was system space or session space */
|
|
|
|
if (!IsSessionAddress)
|
|
|
|
{
|
|
|
|
/* Check if the PTE is still valid under PFN lock */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
|
|
|
TempPte = *PointerPte;
|
|
|
|
if (TempPte.u.Hard.Valid)
|
|
|
|
{
|
|
|
|
/* Check if this was a write */
|
|
|
|
if (StoreInstruction)
|
|
|
|
{
|
|
|
|
/* Was it to a read-only page? */
|
|
|
|
Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
|
|
|
|
if (!(PointerPte->u.Long & PTE_READWRITE) &&
|
|
|
|
!(Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE))
|
|
|
|
{
|
|
|
|
/* Crash with distinguished bugcheck code */
|
|
|
|
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
PointerPte->u.Long,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
|
|
|
11);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release PFN lock and return all good */
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Check if this was a session PTE that needs to remap the session PDE */
|
|
|
|
if (MI_IS_SESSION_PTE(Address))
|
|
|
|
{
|
2012-08-01 07:54:37 +00:00
|
|
|
/* Do the remapping */
|
|
|
|
Status = MiCheckPdeForSessionSpace(Address);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
/* It failed, this address is invalid */
|
|
|
|
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
StoreInstruction,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
|
|
|
6);
|
|
|
|
}
|
2012-07-21 19:07:11 +00:00
|
|
|
}
|
2012-02-06 14:35:09 +00:00
|
|
|
|
2012-02-29 23:11:21 +00:00
|
|
|
/* Check for a fault on the page table or hyperspace */
|
2012-07-21 19:07:11 +00:00
|
|
|
if (MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS(Address))
|
|
|
|
{
|
|
|
|
#if (_MI_PAGING_LEVELS < 3)
|
|
|
|
/* Windows does this check but I don't understand why -- it's done above! */
|
|
|
|
ASSERT(MiCheckPdeForPagedPool(Address) != STATUS_WAIT_1);
|
|
|
|
#endif
|
|
|
|
/* Handle this as a user mode fault */
|
|
|
|
goto UserFault;
|
|
|
|
}
|
2012-02-06 14:32:07 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Get the current thread */
|
|
|
|
CurrentThread = PsGetCurrentThread();
|
|
|
|
|
|
|
|
/* What kind of address is this */
|
|
|
|
if (!IsSessionAddress)
|
|
|
|
{
|
|
|
|
/* Use the system working set */
|
|
|
|
WorkingSet = &MmSystemCacheWs;
|
|
|
|
CurrentProcess = NULL;
|
|
|
|
|
|
|
|
/* Make sure we don't have a recursive working set lock */
|
|
|
|
if ((CurrentThread->OwnsProcessWorkingSetExclusive) ||
|
|
|
|
(CurrentThread->OwnsProcessWorkingSetShared) ||
|
|
|
|
(CurrentThread->OwnsSystemWorkingSetExclusive) ||
|
|
|
|
(CurrentThread->OwnsSystemWorkingSetShared) ||
|
|
|
|
(CurrentThread->OwnsSessionWorkingSetExclusive) ||
|
|
|
|
(CurrentThread->OwnsSessionWorkingSetShared))
|
|
|
|
{
|
|
|
|
/* Fail */
|
|
|
|
return STATUS_IN_PAGE_ERROR | 0x10000000;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-08-01 07:54:37 +00:00
|
|
|
/* Use the session process and working set */
|
|
|
|
CurrentProcess = HYDRA_PROCESS;
|
|
|
|
WorkingSet = &MmSessionSpace->GlobalVirtualAddress->Vm;
|
|
|
|
|
|
|
|
/* Make sure we don't have a recursive working set lock */
|
|
|
|
if ((CurrentThread->OwnsSessionWorkingSetExclusive) ||
|
|
|
|
(CurrentThread->OwnsSessionWorkingSetShared))
|
|
|
|
{
|
|
|
|
/* Fail */
|
|
|
|
return STATUS_IN_PAGE_ERROR | 0x10000000;
|
|
|
|
}
|
2012-07-21 19:07:11 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-02-06 14:32:07 +00:00
|
|
|
/* Acquire the working set lock */
|
2010-07-22 18:26:04 +00:00
|
|
|
KeRaiseIrql(APC_LEVEL, &LockIrql);
|
|
|
|
MiLockWorkingSet(CurrentThread, WorkingSet);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-02-06 09:26:23 +00:00
|
|
|
/* Re-read PTE now that we own the lock */
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
TempPte = *PointerPte;
|
|
|
|
if (TempPte.u.Hard.Valid == 1)
|
|
|
|
{
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Check if this was a write */
|
|
|
|
if (StoreInstruction)
|
|
|
|
{
|
|
|
|
/* Was it to a read-only page that is not copy on write? */
|
|
|
|
Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
|
|
|
|
if (!(TempPte.u.Long & PTE_READWRITE) &&
|
|
|
|
!(Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE) &&
|
|
|
|
!(TempPte.u.Hard.CopyOnWrite))
|
|
|
|
{
|
|
|
|
/* Case not yet handled */
|
|
|
|
ASSERT(!IsSessionAddress);
|
|
|
|
|
|
|
|
/* Crash with distinguished bugcheck code */
|
|
|
|
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
TempPte.u.Long,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
|
|
|
12);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-01 07:54:37 +00:00
|
|
|
/* Check for read-only write in session space */
|
|
|
|
if ((IsSessionAddress) &&
|
|
|
|
(StoreInstruction) &&
|
|
|
|
!(TempPte.u.Hard.Write))
|
|
|
|
{
|
|
|
|
/* Sanity check */
|
|
|
|
ASSERT(MI_IS_SESSION_IMAGE_ADDRESS(Address));
|
|
|
|
|
|
|
|
/* Was this COW? */
|
|
|
|
if (TempPte.u.Hard.CopyOnWrite == 0)
|
|
|
|
{
|
|
|
|
/* Then this is not allowed */
|
|
|
|
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
(ULONG_PTR)TempPte.u.Long,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
|
|
|
13);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise, handle COW */
|
|
|
|
ASSERT(FALSE);
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
/* Release the working set */
|
|
|
|
MiUnlockWorkingSet(CurrentThread, WorkingSet);
|
|
|
|
KeLowerIrql(LockIrql);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Otherwise, the PDE was probably invalid, and all is good now */
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Check one kind of prototype PTE */
|
|
|
|
if (TempPte.u.Soft.Prototype)
|
|
|
|
{
|
|
|
|
/* Make sure protected pool is on, and that this is a pool address */
|
|
|
|
if ((MmProtectFreedNonPagedPool) &&
|
|
|
|
(((Address >= MmNonPagedPoolStart) &&
|
|
|
|
(Address < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
|
|
|
|
MmSizeOfNonPagedPoolInBytes))) ||
|
|
|
|
((Address >= MmNonPagedPoolExpansionStart) &&
|
|
|
|
(Address < MmNonPagedPoolEnd))))
|
|
|
|
{
|
|
|
|
/* Bad boy, bad boy, whatcha gonna do, whatcha gonna do when ARM3 comes for you! */
|
|
|
|
KeBugCheckEx(DRIVER_CAUGHT_MODIFYING_FREED_POOL,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
StoreInstruction,
|
|
|
|
Mode,
|
|
|
|
4);
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-04 18:51:07 +00:00
|
|
|
/* Get the prototype PTE! */
|
|
|
|
ProtoPte = MiProtoPteToPte(&TempPte);
|
2012-07-21 19:07:11 +00:00
|
|
|
|
2012-08-01 07:54:37 +00:00
|
|
|
/* Do we need to locate the prototype PTE in session space? */
|
|
|
|
if ((IsSessionAddress) &&
|
|
|
|
(TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED))
|
|
|
|
{
|
|
|
|
/* Yep, go find it as well as the VAD for it */
|
|
|
|
ProtoPte = MiCheckVirtualAddress(Address,
|
|
|
|
&ProtectionCode,
|
|
|
|
&Vad);
|
|
|
|
ASSERT(ProtoPte != NULL);
|
|
|
|
}
|
2010-08-29 19:13:08 +00:00
|
|
|
}
|
2010-10-19 18:57:30 +00:00
|
|
|
else
|
2010-12-26 15:23:03 +00:00
|
|
|
{
|
2012-02-06 09:26:23 +00:00
|
|
|
/* We don't implement transition PTEs */
|
2010-10-19 18:57:30 +00:00
|
|
|
ASSERT(TempPte.u.Soft.Transition == 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-19 18:57:30 +00:00
|
|
|
/* Check for no-access PTE */
|
|
|
|
if (TempPte.u.Soft.Protection == MM_NOACCESS)
|
|
|
|
{
|
2012-02-06 09:26:23 +00:00
|
|
|
/* Bugcheck the system! */
|
2010-10-19 18:57:30 +00:00
|
|
|
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
StoreInstruction,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
|
|
|
1);
|
|
|
|
}
|
2012-07-21 19:07:11 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Check for demand page */
|
|
|
|
if ((StoreInstruction) &&
|
|
|
|
!(ProtoPte) &&
|
|
|
|
!(IsSessionAddress) &&
|
|
|
|
!(TempPte.u.Hard.Valid))
|
|
|
|
{
|
|
|
|
/* Get the protection code */
|
|
|
|
ASSERT(TempPte.u.Soft.Transition == 0);
|
|
|
|
if (!(TempPte.u.Soft.Protection & MM_READWRITE))
|
2010-10-04 18:51:07 +00:00
|
|
|
{
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Bugcheck the system! */
|
|
|
|
KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
|
|
|
|
(ULONG_PTR)Address,
|
|
|
|
TempPte.u.Long,
|
|
|
|
(ULONG_PTR)TrapInformation,
|
|
|
|
14);
|
2010-10-04 18:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-02-06 09:26:23 +00:00
|
|
|
/* Now do the real fault handling */
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
Status = MiDispatchFault(StoreInstruction,
|
|
|
|
Address,
|
|
|
|
PointerPte,
|
2010-10-04 18:51:07 +00:00
|
|
|
ProtoPte,
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
FALSE,
|
2012-02-06 14:32:07 +00:00
|
|
|
CurrentProcess,
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
TrapInformation,
|
|
|
|
NULL);
|
2010-07-22 18:26:04 +00:00
|
|
|
|
|
|
|
/* Release the working set */
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
ASSERT(KeAreAllApcsDisabled() == TRUE);
|
2010-07-22 18:26:04 +00:00
|
|
|
MiUnlockWorkingSet(CurrentThread, WorkingSet);
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
KeLowerIrql(LockIrql);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-02-06 09:26:23 +00:00
|
|
|
/* We are done! */
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
DPRINT("Fault resolved with status: %lx\n", Status);
|
|
|
|
return Status;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-02-06 14:32:07 +00:00
|
|
|
/* This is a user fault */
|
2012-02-29 23:11:21 +00:00
|
|
|
UserFault:
|
2012-02-06 14:32:07 +00:00
|
|
|
CurrentThread = PsGetCurrentThread();
|
|
|
|
CurrentProcess = (PEPROCESS)CurrentThread->Tcb.ApcState.Process;
|
|
|
|
|
|
|
|
/* Lock the working set */
|
|
|
|
MiLockProcessWorkingSet(CurrentProcess, CurrentThread);
|
|
|
|
|
2012-02-05 17:19:58 +00:00
|
|
|
#if (_MI_PAGING_LEVELS == 4)
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
// Note to Timo: You should call MiCheckVirtualAddress and also check if it's zero pte
|
|
|
|
// also this is missing the page count increment
|
2012-02-06 15:08:32 +00:00
|
|
|
/* Check if the PXE is valid */
|
|
|
|
if (PointerPxe->u.Hard.Valid == 0)
|
|
|
|
{
|
|
|
|
/* Right now, we only handle scenarios where the PXE is totally empty */
|
|
|
|
ASSERT(PointerPxe->u.Long == 0);
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
#if 0
|
2012-02-06 15:08:32 +00:00
|
|
|
/* Resolve a demand zero fault */
|
|
|
|
Status = MiResolveDemandZeroFault(PointerPpe,
|
|
|
|
MM_READWRITE,
|
|
|
|
CurrentProcess,
|
|
|
|
MM_NOIRQL);
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
#endif
|
2012-02-06 15:08:32 +00:00
|
|
|
/* We should come back with a valid PXE */
|
|
|
|
ASSERT(PointerPxe->u.Hard.Valid == 1);
|
|
|
|
}
|
2012-02-05 17:19:58 +00:00
|
|
|
#endif
|
2012-02-06 14:32:07 +00:00
|
|
|
|
2012-02-05 17:19:58 +00:00
|
|
|
#if (_MI_PAGING_LEVELS >= 3)
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
// Note to Timo: You should call MiCheckVirtualAddress and also check if it's zero pte
|
|
|
|
// also this is missing the page count increment
|
2012-02-06 15:08:32 +00:00
|
|
|
/* Check if the PPE is valid */
|
|
|
|
if (PointerPpe->u.Hard.Valid == 0)
|
|
|
|
{
|
|
|
|
/* Right now, we only handle scenarios where the PPE is totally empty */
|
|
|
|
ASSERT(PointerPpe->u.Long == 0);
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
#if 0
|
2012-02-06 15:08:32 +00:00
|
|
|
/* Resolve a demand zero fault */
|
|
|
|
Status = MiResolveDemandZeroFault(PointerPde,
|
|
|
|
MM_READWRITE,
|
|
|
|
CurrentProcess,
|
|
|
|
MM_NOIRQL);
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
#endif
|
2012-02-06 15:08:32 +00:00
|
|
|
/* We should come back with a valid PPE */
|
|
|
|
ASSERT(PointerPpe->u.Hard.Valid == 1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Check if the PDE is valid */
|
2010-07-22 18:37:27 +00:00
|
|
|
if (PointerPde->u.Hard.Valid == 0)
|
|
|
|
{
|
|
|
|
/* Right now, we only handle scenarios where the PDE is totally empty */
|
|
|
|
ASSERT(PointerPde->u.Long == 0);
|
|
|
|
|
|
|
|
/* And go dispatch the fault on the PDE. This should handle the demand-zero */
|
2010-11-02 15:16:22 +00:00
|
|
|
#if MI_TRACE_PFNS
|
|
|
|
UserPdeFault = TRUE;
|
|
|
|
#endif
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
MiCheckVirtualAddress(Address, &ProtectionCode, &Vad);
|
|
|
|
if (ProtectionCode == MM_NOACCESS)
|
|
|
|
{
|
|
|
|
#if (_MI_PAGING_LEVELS == 2)
|
|
|
|
/* Could be a page table for paged pool */
|
|
|
|
MiCheckPdeForPagedPool(Address);
|
|
|
|
#endif
|
|
|
|
/* Has the code above changed anything -- is this now a valid PTE? */
|
2012-03-31 22:45:17 +00:00
|
|
|
Status = (PointerPde->u.Hard.Valid == 1) ? STATUS_SUCCESS : STATUS_ACCESS_VIOLATION;
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
|
|
|
|
/* Either this was a bogus VA or we've fixed up a paged pool PDE */
|
|
|
|
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write a demand-zero PDE */
|
|
|
|
MI_WRITE_INVALID_PTE(PointerPde, DemandZeroPde);
|
|
|
|
|
|
|
|
/* Dispatch the fault */
|
|
|
|
Status = MiDispatchFault(TRUE,
|
|
|
|
PointerPte,
|
|
|
|
PointerPde,
|
|
|
|
NULL,
|
|
|
|
FALSE,
|
|
|
|
PsGetCurrentProcess(),
|
|
|
|
TrapInformation,
|
|
|
|
NULL);
|
2010-11-02 15:16:22 +00:00
|
|
|
#if MI_TRACE_PFNS
|
|
|
|
UserPdeFault = FALSE;
|
|
|
|
#endif
|
2010-07-22 18:37:27 +00:00
|
|
|
/* We should come back with APCs enabled, and with a valid PDE */
|
|
|
|
ASSERT(KeAreAllApcsDisabled() == TRUE);
|
|
|
|
ASSERT(PointerPde->u.Hard.Valid == 1);
|
|
|
|
}
|
2012-07-21 19:07:11 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Not yet implemented in ReactOS */
|
|
|
|
ASSERT(MI_IS_PAGE_LARGE(PointerPde) == FALSE);
|
|
|
|
}
|
2010-07-22 18:37:27 +00:00
|
|
|
|
2010-10-07 17:27:23 +00:00
|
|
|
/* Now capture the PTE. Ignore virtual faults for now */
|
2010-07-22 18:37:27 +00:00
|
|
|
TempPte = *PointerPte;
|
2010-10-07 17:27:23 +00:00
|
|
|
ASSERT(TempPte.u.Hard.Valid == 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-10-07 17:27:23 +00:00
|
|
|
/* Quick check for demand-zero */
|
|
|
|
if (TempPte.u.Long == (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS))
|
|
|
|
{
|
|
|
|
/* Resolve the fault */
|
|
|
|
MiResolveDemandZeroFault(Address,
|
Two Part Patch which fixes ARM3 Section Support (not yet enabled). This had been enabled in the past for testing and resulted in bizare crashes during testing. The amount of fixing required should reveal why:
Part 1: Page Fault Path Fixes
[NTOS]: As an optimization, someone seems to have had changed the MiResolveDemandZeroFault prototype not to require a PTE, and to instead take a protection mask directly. While clever, this broke support for ARM3 sections, because the code was now assuming that the protection of the PTE for the input address should be used -- while in NT Sections we instead use what are called ProtoType PTEs. This was very annoying to debug, but since the cause has been fixed, I've reverted back to the old convention in which the PTE is passed-in, and this can be a different PTE than the PTE for the address, as it should be.
[NTOS]: Due to the reverting of the original path, another optimization, in which MiResolveDemandZeroFault was being called directly instead of going through MiDispatchFault and writing an invalid demand-zero PDE has also been removed. PDE faults are now going through the correct, expected path.
[NTOS]: MiResolveDemandZeroFault was always creating Kernel PTEs. It should create User PTEs when necessary.
[NTOS]: MiDeletePte was assuming any prototype PTE is a forked PTE. Forked PTEs only happen when the addresses in the PTE don't match, so check for that too.
Part 2: ARM3 Section Object Fixes
[NTOS]: Fix issue when trying to make both ROS_SECTION_OBJECTs and NT's SECTION co-exist. We relied on the *caller* knowing what kind of section this is, and that can't be a good idea. Now, when the caller requests an ARM3 section vs a ROS section, we use a marker to detect what kind of section this is for later APIs.
[NTOS]: For section VADs, we were storing the ReactOS MEMORY_AREA in the ControlArea... however, the mappings of one individual section object share a single control area, even though they have multiple MEMORY_AREAs (one for each mapping). As such, we overwrote the MEMORY_AREA continously, and at free-time, double or triple-freed the same memory area.
[NTOS]: Moved the MEMORY_AREA to the "Banked" field of the long VAD, instead of the ControlArea. Allocate MMVAD_LONGs for ARM3 sections for now, to support this. Also, after deleting the MEMORY_AREA while parsing VADs, we now use a special marker to detect double-frees, and we also use a special marker to make sure we have a Long VAD as expected.
svn path=/trunk/; revision=56035
2012-03-05 16:41:46 +00:00
|
|
|
PointerPte,
|
2010-10-07 17:27:23 +00:00
|
|
|
CurrentProcess,
|
|
|
|
MM_NOIRQL);
|
|
|
|
|
|
|
|
/* Return the status */
|
|
|
|
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
|
|
|
|
return STATUS_PAGE_FAULT_DEMAND_ZERO;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Check for zero PTE */
|
|
|
|
if (TempPte.u.Long == 0)
|
[NTOS]: A few key changes to the page fault path:
1) MiCheckVirtualAddress should be called *after* determining if the PTE is a Demand Zero PTE. This is because when memory is allocated with MEM_RESERVE, and then MEM_COMMIT is called later, the VAD does not have the MemCommit flag set to TRUE. As such, MiCheckVirtualAddress returns MM_NOACCESS for the VAD (even though one is found) and the demand zero fault results in an access violation. Double-checked with Windows and this is the right behavior.
2) MiCheckVirtualAddress now supports non-commited reserve VADs (ie: trying to access MEM_RESERVE memory). It used to ASSERT, now it returns MM_NOACCESS so an access violation is raised. Before change #1, this would also happen if MEM_COMMIT was later performed on the ranges, but this is now fixed.
3) When calling MiResolveDemandZeroFault, we should not make the PDE a demand zero PDE. This is senseless. The whole point is that the PDE does exist, and MiInitializePfn needs it to keep track of the page table allocation. Removed the nonsensical line of code which performed cleard the PDE during a demand-zero fault.
I am able to boot to 3rd stage with these changes, so I have seen no regressions. Additionally, with these changes, the as-of-yet-uncommitted VAD-based Virtual Memory code completes 1st stage setup successfully, instead of instantly crashing on boot.
svn path=/trunk/; revision=55894
2012-02-27 23:42:22 +00:00
|
|
|
{
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Check if this address range belongs to a valid allocation (VAD) */
|
|
|
|
ProtoPte = MiCheckVirtualAddress(Address, &ProtectionCode, &Vad);
|
|
|
|
if (ProtectionCode == MM_NOACCESS)
|
|
|
|
{
|
2012-02-29 23:11:21 +00:00
|
|
|
#if (_MI_PAGING_LEVELS == 2)
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Could be a page table for paged pool */
|
|
|
|
MiCheckPdeForPagedPool(Address);
|
2012-02-29 23:11:21 +00:00
|
|
|
#endif
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Has the code above changed anything -- is this now a valid PTE? */
|
|
|
|
Status = (PointerPte->u.Hard.Valid == 1) ? STATUS_SUCCESS : STATUS_ACCESS_VIOLATION;
|
2012-02-29 23:11:21 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Either this was a bogus VA or we've fixed up a paged pool PDE */
|
|
|
|
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
|
|
|
|
return Status;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* No guard page support yet */
|
|
|
|
ASSERT((ProtectionCode & MM_DECOMMIT) == 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/*
|
|
|
|
* Check if this is a real user-mode address or actually a kernel-mode
|
|
|
|
* page table for a user mode address
|
|
|
|
*/
|
|
|
|
if (Address <= MM_HIGHEST_USER_ADDRESS)
|
|
|
|
{
|
|
|
|
/* Add an additional page table reference */
|
2012-09-03 16:29:31 +00:00
|
|
|
MiIncrementPageTableReferences(Address);
|
2012-07-21 19:07:11 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Did we get a prototype PTE back? */
|
|
|
|
if (!ProtoPte)
|
|
|
|
{
|
|
|
|
/* Is this PTE actually part of the PDE-PTE self-mapping directory? */
|
|
|
|
if (PointerPde == MiAddressToPde(PTE_BASE))
|
|
|
|
{
|
|
|
|
/* Then it's really a demand-zero PDE (on behalf of user-mode) */
|
|
|
|
MI_WRITE_INVALID_PTE(PointerPte, DemandZeroPde);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* No, create a new PTE. First, write the protection */
|
|
|
|
PointerPte->u.Soft.Protection = ProtectionCode;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Lock the PFN database since we're going to grab a page */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
2010-07-22 18:37:27 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Make sure we have enough pages */
|
|
|
|
ASSERT(MmAvailablePages >= 32);
|
2012-02-29 23:11:21 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Try to get a zero page */
|
|
|
|
MI_SET_USAGE(MI_USAGE_PEB_TEB);
|
|
|
|
MI_SET_PROCESS2(CurrentProcess->ImageFileName);
|
|
|
|
Color = MI_GET_NEXT_PROCESS_COLOR(CurrentProcess);
|
|
|
|
PageFrameIndex = MiRemoveZeroPageSafe(Color);
|
|
|
|
if (!PageFrameIndex)
|
|
|
|
{
|
|
|
|
/* Grab a page out of there. Later we should grab a colored zero page */
|
|
|
|
PageFrameIndex = MiRemoveAnyPage(Color);
|
|
|
|
ASSERT(PageFrameIndex);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Release the lock since we need to do some zeroing */
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
2010-07-22 18:37:27 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Zero out the page, since it's for user-mode */
|
|
|
|
MiZeroPfn(PageFrameIndex);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Grab the lock again so we can initialize the PFN entry */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the PFN entry now */
|
|
|
|
MiInitializePfn(PageFrameIndex, PointerPte, 1);
|
2010-07-22 18:37:27 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* And we're done with the lock */
|
2010-09-29 01:10:28 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
2010-07-22 18:37:27 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Increment the count of pages in the process */
|
|
|
|
CurrentProcess->NumberOfPrivatePages++;
|
2010-07-22 18:37:27 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* One more demand-zero fault */
|
|
|
|
InterlockedIncrement(&KeGetCurrentPrcb()->MmDemandZeroCount);
|
2010-07-22 18:37:27 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Fault on user PDE, or fault on user PTE? */
|
|
|
|
if (PointerPte <= MiHighestUserPte)
|
|
|
|
{
|
|
|
|
/* User fault, build a user PTE */
|
|
|
|
MI_MAKE_HARDWARE_PTE_USER(&TempPte,
|
|
|
|
PointerPte,
|
|
|
|
PointerPte->u.Soft.Protection,
|
|
|
|
PageFrameIndex);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* This is a user-mode PDE, create a kernel PTE for it */
|
|
|
|
MI_MAKE_HARDWARE_PTE(&TempPte,
|
|
|
|
PointerPte,
|
|
|
|
PointerPte->u.Soft.Protection,
|
|
|
|
PageFrameIndex);
|
|
|
|
}
|
2010-07-22 18:37:27 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Write the dirty bit for writeable pages */
|
|
|
|
if (MI_IS_PAGE_WRITEABLE(&TempPte)) MI_MAKE_DIRTY_PAGE(&TempPte);
|
2012-02-06 09:26:23 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* And now write down the PTE, making the address valid */
|
|
|
|
MI_WRITE_VALID_PTE(PointerPte, TempPte);
|
|
|
|
Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
|
|
|
|
ASSERT(Pfn1->u1.Event == NULL);
|
2010-07-22 18:37:27 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Demand zero */
|
|
|
|
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
|
|
|
|
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
|
|
|
|
return STATUS_PAGE_FAULT_DEMAND_ZERO;
|
2012-02-29 23:11:21 +00:00
|
|
|
}
|
2010-07-22 20:52:23 +00:00
|
|
|
|
2010-10-05 08:14:02 +00:00
|
|
|
/* No guard page support yet */
|
|
|
|
ASSERT((ProtectionCode & MM_DECOMMIT) == 0);
|
|
|
|
ASSERT(ProtectionCode != 0x100);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-07-22 20:52:23 +00:00
|
|
|
/* Write the prototype PTE */
|
|
|
|
TempPte = PrototypePte;
|
|
|
|
TempPte.u.Soft.Protection = ProtectionCode;
|
|
|
|
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
|
2010-07-22 18:37:27 +00:00
|
|
|
}
|
2012-07-21 19:07:11 +00:00
|
|
|
else
|
|
|
|
{
|
2012-08-03 11:34:35 +00:00
|
|
|
/* Get the protection code and check if this is a proto PTE */
|
|
|
|
ProtectionCode = TempPte.u.Soft.Protection;
|
|
|
|
if (TempPte.u.Soft.Prototype)
|
|
|
|
{
|
|
|
|
/* Do we need to go find the real PTE? */
|
|
|
|
if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
|
|
|
|
{
|
|
|
|
/* Get the prototype pte and VAD for it */
|
|
|
|
ProtoPte = MiCheckVirtualAddress(Address,
|
|
|
|
&ProtectionCode,
|
|
|
|
&Vad);
|
|
|
|
if (!ProtoPte)
|
|
|
|
{
|
|
|
|
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
|
|
|
|
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
|
|
|
|
return STATUS_ACCESS_VIOLATION;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Get the prototype PTE! */
|
|
|
|
ProtoPte = MiProtoPteToPte(&TempPte);
|
|
|
|
|
|
|
|
/* Is it read-only */
|
|
|
|
if (TempPte.u.Proto.ReadOnly)
|
|
|
|
{
|
|
|
|
/* Set read-only code */
|
|
|
|
ProtectionCode = MM_READONLY;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Set unknown protection */
|
|
|
|
ProtectionCode = 0x100;
|
|
|
|
ASSERT(CurrentProcess->CloneRoot != NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-07-21 19:07:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: Run MiAccessCheck */
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
/* Dispatch the fault */
|
|
|
|
Status = MiDispatchFault(StoreInstruction,
|
|
|
|
Address,
|
|
|
|
PointerPte,
|
|
|
|
ProtoPte,
|
|
|
|
FALSE,
|
|
|
|
CurrentProcess,
|
|
|
|
TrapInformation,
|
|
|
|
Vad);
|
|
|
|
|
|
|
|
/* Return the status */
|
|
|
|
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
|
2010-07-22 18:26:04 +00:00
|
|
|
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
|
2010-07-22 20:52:23 +00:00
|
|
|
return Status;
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
}
|
|
|
|
|
2012-02-18 23:59:31 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
2012-02-20 06:42:02 +00:00
|
|
|
MmGetExecuteOptions(IN PULONG ExecuteOptions)
|
2012-02-18 23:59:31 +00:00
|
|
|
{
|
2012-02-20 06:42:02 +00:00
|
|
|
PKPROCESS CurrentProcess = &PsGetCurrentProcess()->Pcb;
|
|
|
|
ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
|
2012-02-18 23:59:31 +00:00
|
|
|
|
2012-02-20 06:42:02 +00:00
|
|
|
*ExecuteOptions = 0;
|
2012-02-28 17:50:21 +00:00
|
|
|
|
2012-02-20 06:42:02 +00:00
|
|
|
if (CurrentProcess->Flags.ExecuteDisable)
|
|
|
|
{
|
|
|
|
*ExecuteOptions |= MEM_EXECUTE_OPTION_DISABLE;
|
|
|
|
}
|
2012-02-28 17:50:21 +00:00
|
|
|
|
2012-02-20 06:42:02 +00:00
|
|
|
if (CurrentProcess->Flags.ExecuteEnable)
|
|
|
|
{
|
|
|
|
*ExecuteOptions |= MEM_EXECUTE_OPTION_ENABLE;
|
|
|
|
}
|
2012-02-28 17:50:21 +00:00
|
|
|
|
2012-02-20 06:42:02 +00:00
|
|
|
if (CurrentProcess->Flags.DisableThunkEmulation)
|
|
|
|
{
|
|
|
|
*ExecuteOptions |= MEM_EXECUTE_OPTION_DISABLE_THUNK_EMULATION;
|
|
|
|
}
|
2012-02-28 17:50:21 +00:00
|
|
|
|
2012-02-20 06:42:02 +00:00
|
|
|
if (CurrentProcess->Flags.Permanent)
|
|
|
|
{
|
|
|
|
*ExecuteOptions |= MEM_EXECUTE_OPTION_PERMANENT;
|
|
|
|
}
|
2012-02-28 17:50:21 +00:00
|
|
|
|
2012-02-20 06:42:02 +00:00
|
|
|
if (CurrentProcess->Flags.ExecuteDispatchEnable)
|
|
|
|
{
|
|
|
|
*ExecuteOptions |= MEM_EXECUTE_OPTION_EXECUTE_DISPATCH_ENABLE;
|
|
|
|
}
|
2012-02-28 17:50:21 +00:00
|
|
|
|
2012-02-20 06:42:02 +00:00
|
|
|
if (CurrentProcess->Flags.ImageDispatchEnable)
|
|
|
|
{
|
|
|
|
*ExecuteOptions |= MEM_EXECUTE_OPTION_IMAGE_DISPATCH_ENABLE;
|
|
|
|
}
|
2012-02-28 17:50:21 +00:00
|
|
|
|
2012-02-20 06:42:02 +00:00
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmSetExecuteOptions(IN ULONG ExecuteOptions)
|
|
|
|
{
|
2012-02-18 23:59:31 +00:00
|
|
|
PKPROCESS CurrentProcess = &PsGetCurrentProcess()->Pcb;
|
|
|
|
KLOCK_QUEUE_HANDLE ProcessLock;
|
|
|
|
NTSTATUS Status = STATUS_ACCESS_DENIED;
|
|
|
|
ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
|
|
|
|
|
|
|
|
/* Only accept valid flags */
|
|
|
|
if (ExecuteOptions & ~MEM_EXECUTE_OPTION_VALID_FLAGS)
|
|
|
|
{
|
|
|
|
/* Fail */
|
|
|
|
DPRINT1("Invalid no-execute options\n");
|
|
|
|
return STATUS_INVALID_PARAMETER;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Change the NX state in the process lock */
|
|
|
|
KiAcquireProcessLock(CurrentProcess, &ProcessLock);
|
|
|
|
|
|
|
|
/* Don't change anything if the permanent flag was set */
|
|
|
|
if (!CurrentProcess->Flags.Permanent)
|
|
|
|
{
|
|
|
|
/* Start by assuming it's not disabled */
|
|
|
|
CurrentProcess->Flags.ExecuteDisable = FALSE;
|
|
|
|
|
|
|
|
/* Now process each flag and turn the equivalent bit on */
|
|
|
|
if (ExecuteOptions & MEM_EXECUTE_OPTION_DISABLE)
|
|
|
|
{
|
|
|
|
CurrentProcess->Flags.ExecuteDisable = TRUE;
|
|
|
|
}
|
|
|
|
if (ExecuteOptions & MEM_EXECUTE_OPTION_ENABLE)
|
|
|
|
{
|
|
|
|
CurrentProcess->Flags.ExecuteEnable = TRUE;
|
|
|
|
}
|
|
|
|
if (ExecuteOptions & MEM_EXECUTE_OPTION_DISABLE_THUNK_EMULATION)
|
|
|
|
{
|
|
|
|
CurrentProcess->Flags.DisableThunkEmulation = TRUE;
|
|
|
|
}
|
|
|
|
if (ExecuteOptions & MEM_EXECUTE_OPTION_PERMANENT)
|
|
|
|
{
|
|
|
|
CurrentProcess->Flags.Permanent = TRUE;
|
|
|
|
}
|
|
|
|
if (ExecuteOptions & MEM_EXECUTE_OPTION_EXECUTE_DISPATCH_ENABLE)
|
|
|
|
{
|
|
|
|
CurrentProcess->Flags.ExecuteDispatchEnable = TRUE;
|
|
|
|
}
|
|
|
|
if (ExecuteOptions & MEM_EXECUTE_OPTION_IMAGE_DISPATCH_ENABLE)
|
|
|
|
{
|
|
|
|
CurrentProcess->Flags.ImageDispatchEnable = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* These are turned on by default if no-execution is also eanbled */
|
|
|
|
if (CurrentProcess->Flags.ExecuteEnable)
|
|
|
|
{
|
|
|
|
CurrentProcess->Flags.ExecuteDispatchEnable = TRUE;
|
|
|
|
CurrentProcess->Flags.ImageDispatchEnable = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* All good */
|
|
|
|
Status = STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release the lock and return status */
|
|
|
|
KiReleaseProcessLock(&ProcessLock);
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
- Implement ARM3 page fault handling.
- Paged pool PTEs are demand zero PTEs while the memory hasn't been accessed -- this is the only type of fault supported.
- Because paged pool PDEs are also demand-paged, added code to handle demand paging of PDEs as well.
- Also, because paged pool is non-resident, but can be accessed from any process, we need a mechanism to sync up the kernel's page directory with the per-process one, on demand. This is done at startup, but other processes may have paged in paged pool that another process knows nothing about when he faults.
- Similar to the hack ReactOS Mm uses, but done properly.
- This is what that shadow system page directory is finally being used for.
- Assert if we get a user-mode fault, a transition fault, or a soft fault, since these shouldn't happen.
- Disable APCs while dispatching faults, and pseudo-use the working set lock.
- Assert if we get write errors on read-only pages, since we don't use those in ARM3 yet.
- Assert if we have a paged out PTE, this shouldn't happen yet.
- Enable test to see if we can touch a paged pool allocation.
svn path=/trunk/; revision=43507
2009-10-15 22:08:26 +00:00
|
|
|
/* EOF */
|