[NTOSKRNL]: Support a few more "unsupported" paths that were hitting ASSERTs before.

[NTOSKRNL]: Massively cleanup triplicated code that is just a series of macros/inlines.
[NTOSKRNL]: Handle more complex prototype PTE faults that are not just demand-zero.
In my private branch, I can now run ReactOS with *all* pagefile-backed sections running under ARM3 mode.

svn path=/trunk/; revision=57033
This commit is contained in:
Alex Ionescu 2012-08-03 11:34:35 +00:00
parent 592e0968b0
commit 266eb48d75
5 changed files with 431 additions and 275 deletions

View file

@ -602,7 +602,6 @@ MmProbeAndLockPages(IN PMDL Mdl,
PFN_NUMBER PageFrameIndex;
BOOLEAN UsePfnLock;
KIRQL OldIrql;
USHORT OldRefCount, RefCount;
PMMPFN Pfn1;
DPRINT("Probing MDL: %p\n", Mdl);
@ -998,45 +997,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
/* This address should already exist and be fully valid */
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
if (MI_IS_ROS_PFN(Pfn1))
{
/* ReactOS Mm doesn't track share count */
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
}
else
{
/* On ARM3 pages, we should see a valid share count */
ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
/* We don't support mapping a prototype page yet */
ASSERT((Pfn1->u3.e1.PrototypePte == 0) && (Pfn1->OriginalPte.u.Soft.Prototype == 0));
}
/* More locked pages! */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, 1);
/* Loop trying to update the reference count */
do
{
/* Get the current reference count, make sure it's valid */
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
ASSERT(OldRefCount < 2500);
/* Bump it up by one */
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
OldRefCount + 1,
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
/* Was this the first lock attempt? */
if (OldRefCount != 1)
{
/* Someone else came through */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
}
MiReferenceProbedPageAndBumpLockCount(Pfn1);
}
else
{
@ -1131,7 +1092,6 @@ MmUnlockPages(IN PMDL Mdl)
PVOID Base;
ULONG Flags, PageCount;
KIRQL OldIrql;
USHORT RefCount, OldRefCount;
PMMPFN Pfn1;
DPRINT("Unlocking MDL: %p\n", Mdl);
@ -1198,66 +1158,7 @@ MmUnlockPages(IN PMDL Mdl)
// Check if this page is in the PFN database
//
Pfn1 = MiGetPfnEntry(*MdlPages);
if (Pfn1)
{
/* Get the current entry and reference count */
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
/* Is this already the last dereference */
if (OldRefCount == 1)
{
/* It should be on a free list waiting for us */
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount == 0);
/* Not supported yet */
ASSERT((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0));
/* One less page */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
/* Do the last dereference, we're done here */
MiDecrementReferenceCount(Pfn1, *MdlPages);
}
else
{
/* Loop decrementing one reference */
do
{
/* Make sure it's still valid */
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
/* Take off one reference */
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
OldRefCount - 1,
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
ASSERT(RefCount > 1);
/* Are there only lock references left? */
if (RefCount == 2)
{
/* And does the page still have users? */
if (Pfn1->u2.ShareCount >= 1)
{
/* Then it should still be valid */
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
/* Not supported yet */
ASSERT((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0));
/* But there is one less "locked" page though */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
}
}
}
}
if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1);
} while (++MdlPages < LastPage);
//
@ -1347,62 +1248,7 @@ MmUnlockPages(IN PMDL Mdl)
{
/* Get the current entry and reference count */
Pfn1 = (PMMPFN)*MdlPages;
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
/* Is this already the last dereference */
if (OldRefCount == 1)
{
/* It should be on a free list waiting for us */
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount == 0);
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* One less page */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
/* Do the last dereference, we're done here */
MiDecrementReferenceCount(Pfn1, MiGetPfnEntryIndex(Pfn1));
}
else
{
/* Loop decrementing one reference */
do
{
/* Make sure it's still valid */
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
/* Take off one reference */
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
OldRefCount - 1,
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
ASSERT(RefCount > 1);
/* Are there only lock references left? */
if (RefCount == 2)
{
/* And does the page still have users? */
if (Pfn1->u2.ShareCount >= 1)
{
/* Then it should still be valid */
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* But there is one less "locked" page though */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
}
}
}
MiDereferencePfnAndDropLockCount(Pfn1);
} while (++MdlPages < LastPage);
//

View file

@ -693,6 +693,7 @@ extern SLIST_HEADER MmDeadStackSListHead;
extern MM_AVL_TABLE MmSectionBasedRoot;
extern KGUARDED_MUTEX MmSectionBasedMutex;
extern PVOID MmHighSectionBase;
extern SIZE_T MmSystemLockPagesCount;
BOOLEAN
FORCEINLINE
@ -983,6 +984,43 @@ MI_WS_OWNER(IN PEPROCESS Process)
(PsGetCurrentThread()->OwnsProcessWorkingSetShared)));
}
//
// New ARM3<->RosMM PAGE Architecture
//
BOOLEAN
FORCEINLINE
MiIsRosSectionObject(IN PVOID Section)
{
PROS_SECTION_OBJECT RosSection = Section;
if ((RosSection->Type == 'SC') && (RosSection->Size == 'TN')) return TRUE;
return FALSE;
}
#ifdef _WIN64
// HACK ON TOP OF HACK ALERT!!!
#define MI_GET_ROS_DATA(x) \
(((x)->RosMmData == 0) ? NULL : ((PMMROSPFN)((ULONG64)(ULONG)((x)->RosMmData) | \
((ULONG64)MmNonPagedPoolStart & 0xffffffff00000000ULL))))
#else
#define MI_GET_ROS_DATA(x) ((PMMROSPFN)(x->RosMmData))
#endif
#define MI_IS_ROS_PFN(x) (((x)->u4.AweAllocation == TRUE) && (MI_GET_ROS_DATA(x) != NULL))
#define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
typedef struct _MMROSPFN
{
PMM_RMAP_ENTRY RmapListHead;
SWAPENTRY SwapEntry;
} MMROSPFN, *PMMROSPFN;
#define RosMmData AweReferenceCount
VOID
NTAPI
MiDecrementReferenceCount(
IN PMMPFN Pfn1,
IN PFN_NUMBER PageFrameIndex
);
//
// Locks the working set for the given process
//
@ -1063,7 +1101,7 @@ MiLockWorkingSet(IN PETHREAD Thread,
/* Own the session working set */
ASSERT((Thread->OwnsSessionWorkingSetExclusive == FALSE) &&
(Thread->OwnsSessionWorkingSetShared == FALSE));
Thread->OwnsSessionWorkingSetExclusive = TRUE;
Thread->OwnsSessionWorkingSetExclusive = TRUE;
}
else
{
@ -1142,6 +1180,237 @@ MI_PFN_ELEMENT(IN PFN_NUMBER Pfn)
return &MmPfnDatabase[Pfn];
};
//
// Drops a locked page without dereferencing it
//
FORCEINLINE
VOID
MiDropLockCount(IN PMMPFN Pfn1)
{
/* This page shouldn't be locked, but it should be valid */
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
ASSERT(Pfn1->u2.ShareCount == 0);
/* Is this the last reference to the page */
if (Pfn1->u3.e2.ReferenceCount == 1)
{
/* It better not be valid */
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
/* Is it a prototype PTE? */
if ((Pfn1->u3.e1.PrototypePte == 1) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
{
/* We don't handle this */
ASSERT(FALSE);
}
/* Update the counter */
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
}
}
//
// Drops a locked page and dereferences it
//
FORCEINLINE
VOID
MiDereferencePfnAndDropLockCount(IN PMMPFN Pfn1)
{
USHORT RefCount, OldRefCount;
PFN_NUMBER PageFrameIndex;
/* Loop while we decrement the page successfully */
do
{
/* There should be at least one reference */
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
/* Are we the last one */
if (OldRefCount == 1)
{
/* The page shoudln't be shared not active at this point */
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount == 0);
/* Is it a prototype PTE? */
if ((Pfn1->u3.e1.PrototypePte == 1) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
{
/* We don't handle this */
ASSERT(FALSE);
}
/* Update the counter, and drop a reference the long way */
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
PageFrameIndex = MiGetPfnEntryIndex(Pfn1);
MiDecrementReferenceCount(Pfn1, PageFrameIndex);
return;
}
/* Drop a reference the short way, and that's it */
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
OldRefCount - 1,
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
/* If we got here, there should be more than one reference */
ASSERT(RefCount > 1);
if (RefCount == 2)
{
/* Is it still being shared? */
if (Pfn1->u2.ShareCount >= 1)
{
/* Then it should be valid */
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
/* Is it a prototype PTE? */
if ((Pfn1->u3.e1.PrototypePte == 1) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
{
/* We don't handle ethis */
ASSERT(FALSE);
}
/* Update the counter */
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
}
}
}
//
// References a locked page and updates the counter
// Used in MmProbeAndLockPages to handle different edge cases
//
FORCEINLINE
VOID
MiReferenceProbedPageAndBumpLockCount(IN PMMPFN Pfn1)
{
USHORT RefCount, OldRefCount;
/* Sanity check */
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
/* Does ARM3 own the page? */
if (MI_IS_ROS_PFN(Pfn1))
{
/* ReactOS Mm doesn't track share count */
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
}
else
{
/* On ARM3 pages, we should see a valid share count */
ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
/* We don't support mapping a prototype page yet */
ASSERT((Pfn1->u3.e1.PrototypePte == 0) && (Pfn1->OriginalPte.u.Soft.Prototype == 0));
}
/* More locked pages! */
InterlockedIncrementSizeT(&MmSystemLockPagesCount);
/* Loop trying to update the reference count */
do
{
/* Get the current reference count, make sure it's valid */
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
ASSERT(OldRefCount < 2500);
/* Bump it up by one */
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
OldRefCount + 1,
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
/* Was this the first lock attempt? If not, undo our bump */
if (OldRefCount != 1) InterlockedDecrementSizeT(&MmSystemLockPagesCount);
}
//
// References a locked page and updates the counter
// Used in all other cases except MmProbeAndLockPages
//
FORCEINLINE
VOID
MiReferenceUsedPageAndBumpLockCount(IN PMMPFN Pfn1)
{
USHORT NewRefCount;
/* Is it a prototype PTE? */
if ((Pfn1->u3.e1.PrototypePte == 1) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
{
/* We don't handle this */
ASSERT(FALSE);
}
/* More locked pages! */
InterlockedIncrementSizeT(&MmSystemLockPagesCount);
/* Update the reference count */
NewRefCount = InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
if (NewRefCount == 2)
{
/* Is it locked or shared? */
if (Pfn1->u2.ShareCount)
{
/* It's shared, so make sure it's active */
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
}
else
{
/* It's locked, so we shouldn't lock again */
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
}
}
else
{
/* Someone had already locked the page, so undo our bump */
ASSERT(NewRefCount < 2500);
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
}
}
//
// References a locked page and updates the counter
// Used in all other cases except MmProbeAndLockPages
//
FORCEINLINE
VOID
MiReferenceUnusedPageAndBumpLockCount(IN PMMPFN Pfn1)
{
USHORT NewRefCount;
/* Make sure the page isn't used yet */
ASSERT(Pfn1->u2.ShareCount == 0);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
/* Is it a prototype PTE? */
if ((Pfn1->u3.e1.PrototypePte == 1) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
{
/* We don't handle this */
ASSERT(FALSE);
}
/* More locked pages! */
InterlockedIncrementSizeT(&MmSystemLockPagesCount);
/* Update the reference count */
NewRefCount = InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
if (NewRefCount != 1)
{
/* Someone had already locked the page, so undo our bump */
ASSERT(NewRefCount < 2500);
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
}
}
BOOLEAN
NTAPI
MmArmInitSystem(
@ -1429,13 +1698,6 @@ MiDecrementShareCount(
IN PFN_NUMBER PageFrameIndex
);
VOID
NTAPI
MiDecrementReferenceCount(
IN PMMPFN Pfn1,
IN PFN_NUMBER PageFrameIndex
);
PFN_NUMBER
NTAPI
MiRemoveAnyPage(
@ -1739,34 +2001,4 @@ MiRemoveZeroPageSafe(IN ULONG Color)
return 0;
}
//
// New ARM3<->RosMM PAGE Architecture
//
BOOLEAN
FORCEINLINE
MiIsRosSectionObject(IN PVOID Section)
{
PROS_SECTION_OBJECT RosSection = Section;
if ((RosSection->Type == 'SC') && (RosSection->Size == 'TN')) return TRUE;
return FALSE;
}
#ifdef _WIN64
// HACK ON TOP OF HACK ALERT!!!
#define MI_GET_ROS_DATA(x) \
(((x)->RosMmData == 0) ? NULL : ((PMMROSPFN)((ULONG64)(ULONG)((x)->RosMmData) | \
((ULONG64)MmNonPagedPoolStart & 0xffffffff00000000ULL))))
#else
#define MI_GET_ROS_DATA(x) ((PMMROSPFN)(x->RosMmData))
#endif
#define MI_IS_ROS_PFN(x) (((x)->u4.AweAllocation == TRUE) && (MI_GET_ROS_DATA(x) != NULL))
#define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
typedef struct _MMROSPFN
{
PMM_RMAP_ENTRY RmapListHead;
SWAPENTRY SwapEntry;
} MMROSPFN, *PMMROSPFN;
#define RosMmData AweReferenceCount
/* EOF */

View file

@ -22,8 +22,6 @@
BOOLEAN UserPdeFault = FALSE;
#endif
LONG MmSystemLockPagesCount;
/* PRIVATE FUNCTIONS **********************************************************/
PMMPTE
@ -505,7 +503,7 @@ MiCompleteProtoPteFault(IN BOOLEAN StoreInstruction,
IN PMMPTE PointerPte,
IN PMMPTE PointerProtoPte,
IN KIRQL OldIrql,
IN PMMPFN* LockedPfn)
IN PMMPFN* LockedProtoPfn)
{
MMPTE TempPte;
PMMPTE OriginalPte, PageTablePte;
@ -569,8 +567,14 @@ MiCompleteProtoPteFault(IN BOOLEAN StoreInstruction,
ASSERT(Pfn1->OriginalPte.u.Soft.Prototype != 0);
}
/* Not yet handled by ReactOS */
ASSERT(LockedPfn == NULL);
/* Did we get a locked incoming PFN? */
if (*LockedProtoPfn)
{
/* Drop a reference */
ASSERT((*LockedProtoPfn)->u3.e2.ReferenceCount >= 1);
MiDereferencePfnAndDropLockCount(*LockedProtoPfn);
*LockedProtoPfn = NULL;
}
/* Release the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
@ -630,7 +634,6 @@ MiResolveTransitionFault(IN PVOID FaultingAddress,
PMMPFN Pfn1;
MMPTE TempPte;
PMMPTE PointerToPteForProtoPage;
USHORT NewRefCount;
DPRINT1("Transition fault on 0x%p with PTE 0x%lx in process %s\n", FaultingAddress, PointerPte, CurrentProcess->ImageFileName);
/* Windowss does this check */
@ -681,57 +684,14 @@ MiResolveTransitionFault(IN PVOID FaultingAddress,
/* Otherwise, the page is removed from its list */
DPRINT1("Transition page in free/zero list\n");
MiUnlinkPageFromList(Pfn1);
/* Windows does these checks -- perhaps a macro? */
ASSERT(Pfn1->u2.ShareCount == 0);
ASSERT(Pfn1->u2.ShareCount == 0);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
/* Check if this was a prototype PTE */
if ((Pfn1->u3.e1.PrototypePte == 1) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
{
DPRINT1("Prototype floating page not yet supported\n");
ASSERT(FALSE);
}
/* Update counter */
InterlockedIncrementSizeT(&MmSystemLockPagesCount);
/* We must be the first reference */
NewRefCount = InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
ASSERT(NewRefCount == 1);
MiReferenceUnusedPageAndBumpLockCount(Pfn1);
}
/* At this point, there should no longer be any in-page errors */
ASSERT(Pfn1->u4.InPageError == 0);
/* Check if this was a PFN with no more share references */
if (Pfn1->u2.ShareCount == 0)
{
/* Windows checks for these... maybe a macro? */
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
ASSERT(Pfn1->u2.ShareCount == 0);
/* Was this the last active reference to it */
DPRINT1("Page share count is zero\n");
if (Pfn1->u3.e2.ReferenceCount == 1)
{
/* The page should be leaking somewhere on the free/zero list */
DPRINT1("Page reference count is one\n");
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
if ((Pfn1->u3.e1.PrototypePte == 1) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
{
/* Do extra processing if it was a prototype page */
DPRINT1("Prototype floating page not yet supported\n");
ASSERT(FALSE);
}
/* Update counter */
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
}
}
if (Pfn1->u2.ShareCount == 0) MiDropLockCount(Pfn1);
/* Bump the share count and make the page valid */
Pfn1->u2.ShareCount++;
@ -821,7 +781,7 @@ MiResolveProtoPteFault(IN BOOLEAN StoreInstruction,
PointerPte,
PointerProtoPte,
OldIrql,
NULL);
OutPfn);
}
/* Make sure there's some protection mask */
@ -897,7 +857,7 @@ MiResolveProtoPteFault(IN BOOLEAN StoreInstruction,
PointerPte,
PointerProtoPte,
OldIrql,
NULL);
OutPfn);
}
NTSTATUS
@ -915,7 +875,7 @@ MiDispatchFault(IN BOOLEAN StoreInstruction,
KIRQL OldIrql, LockIrql;
NTSTATUS Status;
PMMPTE SuperProtoPte;
PMMPFN Pfn1;
PMMPFN Pfn1, OutPfn = NULL;
PFN_NUMBER PageFrameIndex, PteCount, ProcessedPtes;
DPRINT("ARM3 Page Fault Dispatcher for address: %p in process: %p\n",
Address,
@ -964,7 +924,7 @@ MiDispatchFault(IN BOOLEAN StoreInstruction,
Address,
PointerPte,
PointerProtoPte,
NULL,
&OutPfn,
NULL,
NULL,
Process,
@ -1010,14 +970,24 @@ MiDispatchFault(IN BOOLEAN StoreInstruction,
while (TRUE)
{
/* For our current usage, this should be true */
ASSERT(TempPte.u.Hard.Valid == 1);
ASSERT(TempPte.u.Soft.Prototype == 0);
ASSERT(TempPte.u.Soft.Transition == 0);
/* Bump the share count on the PTE */
PageFrameIndex = PFN_FROM_PTE(&TempPte);
Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
Pfn1->u2.ShareCount++;
if (TempPte.u.Hard.Valid == 1)
{
/* Bump the share count on the PTE */
PageFrameIndex = PFN_FROM_PTE(&TempPte);
Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
Pfn1->u2.ShareCount++;
}
else if ((TempPte.u.Soft.Prototype == 0) &&
(TempPte.u.Soft.Transition == 1))
{
/* No standby support yet */
ASSERT(FALSE);
}
else
{
/* Page is invalid, get out of the loop */
break;
}
/* One more done, was it the last? */
if (++ProcessedPtes == PteCount)
@ -1028,7 +998,7 @@ MiDispatchFault(IN BOOLEAN StoreInstruction,
PointerPte,
PointerProtoPte,
LockIrql,
NULL);
&OutPfn);
/* THIS RELEASES THE PFN LOCK! */
break;
@ -1038,21 +1008,62 @@ MiDispatchFault(IN BOOLEAN StoreInstruction,
ASSERT(FALSE);
}
/* Only path that we support for now */
ASSERT(ProcessedPtes != 0);
/* Did we resolve the fault? */
if (ProcessedPtes)
{
/* Bump the transition count */
InterlockedExchangeAdd(&KeGetCurrentPrcb()->MmTransitionCount, ProcessedPtes);
ProcessedPtes--;
/* Bump the transition count */
InterlockedExchangeAdd(&KeGetCurrentPrcb()->MmTransitionCount, ProcessedPtes);
ProcessedPtes--;
/* Loop all the processing we did */
ASSERT(ProcessedPtes == 0);
/* Loop all the processing we did */
ASSERT(ProcessedPtes == 0);
/* Complete this as a transition fault */
ASSERT(OldIrql == KeGetCurrentIrql());
ASSERT(OldIrql <= APC_LEVEL);
ASSERT(KeAreAllApcsDisabled() == TRUE);
return STATUS_PAGE_FAULT_TRANSITION;
}
/* We did not -- PFN lock is still held, prepare to resolve prototype PTE fault */
OutPfn = MI_PFN_ELEMENT(SuperProtoPte->u.Hard.PageFrameNumber);
MiReferenceUsedPageAndBumpLockCount(OutPfn);
ASSERT(OutPfn->u3.e2.ReferenceCount > 1);
ASSERT(PointerPte->u.Hard.Valid == 0);
/* Resolve the fault -- this will release the PFN lock */
Status = MiResolveProtoPteFault(StoreInstruction,
Address,
PointerPte,
PointerProtoPte,
&OutPfn,
NULL,
NULL,
Process,
LockIrql,
TrapInformation);
ASSERT(Status == STATUS_SUCCESS);
/* Did the routine clean out the PFN or should we? */
if (OutPfn)
{
/* We had a locked PFN, so acquire the PFN lock to dereference it */
ASSERT(PointerProtoPte != NULL);
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Dereference the locked PFN */
MiDereferencePfnAndDropLockCount(OutPfn);
ASSERT(OutPfn->u3.e2.ReferenceCount >= 1);
/* And now release the lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
/* Complete this as a transition fault */
ASSERT(OldIrql == KeGetCurrentIrql());
ASSERT(OldIrql <= APC_LEVEL);
ASSERT(KeAreAllApcsDisabled() == TRUE);
return STATUS_PAGE_FAULT_TRANSITION;
return Status;
}
}
@ -1729,8 +1740,46 @@ UserFault:
}
else
{
/* This path is not yet supported */
ASSERT(FALSE);
/* Get the protection code and check if this is a proto PTE */
ProtectionCode = TempPte.u.Soft.Protection;
DPRINT1("Code: %lx\n", ProtecitonCode);
if (TempPte.u.Soft.Prototype)
{
/* Do we need to go find the real PTE? */
DPRINT1("Soft: %lx\n", TempPte.u.Soft.PageFileHigh);
if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
{
/* Get the prototype pte and VAD for it */
ProtoPte = MiCheckVirtualAddress(Address,
&ProtectionCode,
&Vad);
DPRINT1("Address: %p ProtoP %p Code: %lx Vad: %p\n", Address, ProtoPte, ProtectionCode, Vad);
if (!ProtoPte)
{
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return STATUS_ACCESS_VIOLATION;
}
}
else
{
/* Get the prototype PTE! */
ProtoPte = MiProtoPteToPte(&TempPte);
/* Is it read-only */
if (TempPte.u.Proto.ReadOnly)
{
/* Set read-only code */
ProtectionCode = MM_READONLY;
}
else
{
/* Set unknown protection */
ProtectionCode = 0x100;
ASSERT(CurrentProcess->CloneRoot != NULL);
}
}
}
}
/* FIXME: Run MiAccessCheck */

View file

@ -806,7 +806,6 @@ Quickie:
return Status;
}
NTSTATUS
NTAPI
MiSessionCommitPageTables(IN PVOID StartVa,

View file

@ -1313,9 +1313,9 @@ MiQueryAddressState(IN PVOID Va,
OUT PVOID *NextVa)
{
PMMPTE PointerPte;
PMMPTE PointerPte, ProtoPte;
PMMPDE PointerPde;
MMPTE TempPte;
MMPTE TempPte, TempProtoPte;
BOOLEAN DemandZeroPte = TRUE, ValidPte = FALSE;
ULONG State = MEM_RESERVE, Protect = 0;
ASSERT((Vad->StartingVpn <= ((ULONG_PTR)Va >> PAGE_SHIFT)) &&
@ -1353,6 +1353,7 @@ MiQueryAddressState(IN PVOID Va,
if (ValidPte)
{
/* FIXME: watch out for large pages */
ASSERT(PointerPde->u.Hard.LargePage == FALSE);
/* Capture the PTE */
TempPte = *PointerPte;
@ -1376,6 +1377,11 @@ MiQueryAddressState(IN PVOID Va,
/* This means it's committed */
State = MEM_COMMIT;
/* We don't support these */
ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
ASSERT(Vad->u.VadFlags.VadType != VadRotatePhysical);
ASSERT(Vad->u.VadFlags.VadType != VadAwe);
/* Get protection state of this page */
Protect = MiGetPageProtection(PointerPte);
@ -1395,11 +1401,35 @@ MiQueryAddressState(IN PVOID Va,
/* Check if this was a demand-zero PTE, since we need to find the state */
if (DemandZeroPte)
{
/* Check if this is private commited memory, or an image-backed VAD */
/* Not yet handled */
ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
ASSERT(Vad->u.VadFlags.VadType != VadAwe);
/* Check if this is private commited memory, or an section-backed VAD */
if ((Vad->u.VadFlags.PrivateMemory == 0) && (Vad->ControlArea))
{
DPRINT1("Not supported\n");
ASSERT(FALSE);
/* Tell caller about the next range */
*NextVa = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
/* Get the prototype PTE for this VAD */
ProtoPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(Vad,
(ULONG_PTR)Va >> PAGE_SHIFT);
if (ProtoPte)
{
/* We should unlock the working set, but it's not being held! */
/* Is the prototype PTE actually valid (committed)? */
TempProtoPte = *ProtoPte;
if (TempProtoPte.u.Long)
{
/* Unless this is a memory-mapped file, handle it like private VAD */
State = MEM_COMMIT;
ASSERT(Vad->u.VadFlags.VadType != VadImageMap);
Protect = MmProtectToValue[Vad->u.VadFlags.Protection];
}
/* We should re-lock the working set */
}
}
else if (Vad->u.VadFlags.MemCommit)
{