[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.

[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.

svn path=/trunk/; revision=49201
This commit is contained in:
Sir Richard 2010-10-19 04:30:48 +00:00
parent 609dce9faf
commit 52d26f0643
6 changed files with 376 additions and 171 deletions

View file

@ -279,6 +279,17 @@ typedef struct _MEMORY_AREA
} Data; } Data;
} MEMORY_AREA, *PMEMORY_AREA; } MEMORY_AREA, *PMEMORY_AREA;
typedef struct _MM_RMAP_ENTRY
{
struct _MM_RMAP_ENTRY* Next;
PEPROCESS Process;
PVOID Address;
#if DBG
PVOID Caller;
#endif
}
MM_RMAP_ENTRY, *PMM_RMAP_ENTRY;
// //
// These two mappings are actually used by Windows itself, based on the ASSERTS // These two mappings are actually used by Windows itself, based on the ASSERTS
// //

View file

@ -16,8 +16,11 @@
#define MODULE_INVOLVED_IN_ARM3 #define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h" #include "../ARM3/miarm.h"
/* GLOBALS ********************************************************************/
BOOLEAN MmTrackPtes; BOOLEAN MmTrackPtes;
BOOLEAN MmTrackLockedPages; BOOLEAN MmTrackLockedPages;
SIZE_T MmSystemLockPagesCount;
/* PUBLIC FUNCTIONS ***********************************************************/ /* PUBLIC FUNCTIONS ***********************************************************/
@ -248,34 +251,47 @@ MmFreePagesFromMdl(IN PMDL Mdl)
// //
// Reached the last page // Reached the last page
// //
if (*Pages == -1) break; if (*Pages == LIST_HEAD) break;
//
// Sanity check
//
ASSERT(*Pages <= MmHighestPhysicalPage);
// //
// Get the page entry // Get the page entry
// //
Pfn1 = MiGetPfnEntry(*Pages); Pfn1 = MiGetPfnEntry(*Pages);
ASSERT(Pfn1->u3.ReferenceCount == 1); ASSERT(Pfn1);
ASSERT(Pfn1->u2.ShareCount == 1);
ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
if (Pfn1->u4.PteFrame != 0x1FFEDCB)
{
/* Corrupted PFN entry or invalid free */
KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
}
// //
// Clear it // Clear it
// //
Pfn1->u3.e1.StartOfAllocation = 0; Pfn1->u3.e1.StartOfAllocation = 0;
Pfn1->u3.e1.EndOfAllocation = 0; Pfn1->u3.e1.EndOfAllocation = 0;
Pfn1->u2.ShareCount == 0;
// //
// Dereference it // Dereference it
// //
MmDereferencePage(*Pages); ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
if (Pfn1->u3.e2.ReferenceCount != 1)
{
/* Just take off one reference */
InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
}
else
{
/* We'll be nuking the whole page */
MiDecrementReferenceCount(Pfn1, *Pages);
}
// //
// Clear this page and move on // Clear this page and move on
// //
*Pages++ = -1; *Pages++ = LIST_HEAD;
} while (--NumberOfPages != 0); } while (--NumberOfPages != 0);
// //
@ -411,7 +427,7 @@ MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
// //
// We're done here // We're done here
// //
if (*MdlPages == -1) break; if (*MdlPages == LIST_HEAD) break;
// //
// Write the PTE // Write the PTE
@ -573,13 +589,14 @@ MmProbeAndLockPages(IN PMDL Mdl,
ULONG LockPages, TotalPages; ULONG LockPages, TotalPages;
NTSTATUS Status = STATUS_SUCCESS; NTSTATUS Status = STATUS_SUCCESS;
PEPROCESS CurrentProcess; PEPROCESS CurrentProcess;
PMMSUPPORT AddressSpace;
NTSTATUS ProbeStatus; NTSTATUS ProbeStatus;
PMMPTE PointerPte, LastPte; PMMPTE PointerPte, LastPte;
PMMPDE PointerPde; PMMPDE PointerPde;
PFN_NUMBER PageFrameIndex; PFN_NUMBER PageFrameIndex;
BOOLEAN UsePfnLock; BOOLEAN UsePfnLock;
KIRQL OldIrql; KIRQL OldIrql;
USHORT OldRefCount, RefCount;
PMMPFN Pfn1;
DPRINT("Probing MDL: %p\n", Mdl); DPRINT("Probing MDL: %p\n", Mdl);
// //
@ -608,8 +625,17 @@ MmProbeAndLockPages(IN PMDL Mdl,
LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount); LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
ASSERT(LockPages != 0); ASSERT(LockPages != 0);
/* Block invalid access */
if ((AccessMode != KernelMode) &&
((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
{
/* Caller should be in SEH, raise the error */
*MdlPages = LIST_HEAD;
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
}
// //
// Get theprocess // Get the process
// //
if (Address <= MM_HIGHEST_USER_ADDRESS) if (Address <= MM_HIGHEST_USER_ADDRESS)
{ {
@ -632,6 +658,9 @@ MmProbeAndLockPages(IN PMDL Mdl,
TotalPages = LockPages; TotalPages = LockPages;
StartAddress = Address; StartAddress = Address;
/* Large pages not supported */
ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
// //
// Now probe them // Now probe them
// //
@ -646,7 +675,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
// //
// Assume failure // Assume failure
// //
*MdlPages = -1; *MdlPages = LIST_HEAD;
// //
// Read // Read
@ -668,8 +697,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
// //
// Next address... // Next address...
// //
Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE); Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
Address = PAGE_ALIGN(Address);
// //
// Next page... // Next page...
@ -711,6 +739,10 @@ MmProbeAndLockPages(IN PMDL Mdl,
// //
PointerPte = MiAddressToPte(StartAddress); PointerPte = MiAddressToPte(StartAddress);
PointerPde = MiAddressToPde(StartAddress); PointerPde = MiAddressToPde(StartAddress);
#if (_MI_PAGING_LEVELS >= 3)
DPRINT1("PAE/x64 Not Implemented\n");
ASSERT(FALSE);
#endif
// //
// Sanity check // Sanity check
@ -761,7 +793,6 @@ MmProbeAndLockPages(IN PMDL Mdl,
// //
UsePfnLock = TRUE; UsePfnLock = TRUE;
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
AddressSpace = NULL; // Keep compiler happy
} }
else else
{ {
@ -782,13 +813,10 @@ MmProbeAndLockPages(IN PMDL Mdl,
// //
Mdl->Process = CurrentProcess; Mdl->Process = CurrentProcess;
// /* Lock the process working set */
// Use the process lock MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
//
UsePfnLock = FALSE; UsePfnLock = FALSE;
AddressSpace = &CurrentProcess->Vm; OldIrql = MM_NOIRQL;
MmLockAddressSpace(AddressSpace);
OldIrql = DISPATCH_LEVEL; // Keep compiler happy
} }
// //
@ -804,7 +832,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
// //
// Assume failure and check for non-mapped pages // Assume failure and check for non-mapped pages
// //
*MdlPages = -1; *MdlPages = LIST_HEAD;
#if (_MI_PAGING_LEVELS >= 3) #if (_MI_PAGING_LEVELS >= 3)
/* Should be checking the PPE and PXE */ /* Should be checking the PPE and PXE */
ASSERT(FALSE); ASSERT(FALSE);
@ -824,10 +852,8 @@ MmProbeAndLockPages(IN PMDL Mdl,
} }
else else
{ {
// /* Release process working set */
// Release process address space lock MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
//
MmUnlockAddressSpace(AddressSpace);
} }
// //
@ -856,10 +882,8 @@ MmProbeAndLockPages(IN PMDL Mdl,
} }
else else
{ {
// /* Lock the process working set */
// Use the address space lock MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
//
MmLockAddressSpace(AddressSpace);
} }
} }
@ -896,10 +920,8 @@ MmProbeAndLockPages(IN PMDL Mdl,
} }
else else
{ {
// /* Release process working set */
// Release process address space lock MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
//
MmUnlockAddressSpace(AddressSpace);
} }
// //
@ -927,10 +949,8 @@ MmProbeAndLockPages(IN PMDL Mdl,
} }
else else
{ {
// /* Lock the process working set */
// Use the address space lock MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
//
MmLockAddressSpace(AddressSpace);
} }
// //
@ -952,14 +972,55 @@ MmProbeAndLockPages(IN PMDL Mdl,
// Grab the PFN // Grab the PFN
// //
PageFrameIndex = PFN_FROM_PTE(PointerPte); PageFrameIndex = PFN_FROM_PTE(PointerPte);
if (PageFrameIndex <= MmHighestPhysicalPage) Pfn1 = MiGetPfnEntry(PageFrameIndex);
if (Pfn1)
{ {
/* Either this is for kernel-mode, or the working set is held */
ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE)); ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
// /* No Physical VADs supported yet */
// Now lock the page if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
//
MmReferencePage(PageFrameIndex); /* This address should already exist and be fully valid */
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
if (MI_IS_ROS_PFN(Pfn1))
{
/* ReactOS Mm doesn't track share count */
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
}
else
{
/* On ARM3 pages, we should see a valid share count */
ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
/* We don't support mapping a prototype page yet */
ASSERT((Pfn1->u3.e1.PrototypePte == 0) && (Pfn1->OriginalPte.u.Soft.Prototype == 0));
}
/* More locked pages! */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, 1);
/* Loop trying to update the reference count */
do
{
/* Get the current reference count, make sure it's valid */
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
ASSERT(OldRefCount < 2500);
/* Bump it up by one */
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
OldRefCount + 1,
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
/* Was this the first lock attempt? */
if (OldRefCount != 1)
{
/* Someone else came through */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
}
} }
else else
{ {
@ -973,7 +1034,10 @@ MmProbeAndLockPages(IN PMDL Mdl,
// Write the page and move on // Write the page and move on
// //
*MdlPages++ = PageFrameIndex; *MdlPages++ = PageFrameIndex;
if (!((ULONG_PTR)(++PointerPte) & (PAGE_SIZE - 1))) PointerPde++; PointerPte++;
/* Check if we're on a PDE boundary */
if (!((ULONG_PTR)PointerPte & (PD_SIZE - 1))) PointerPde++;
} while (PointerPte <= LastPte); } while (PointerPte <= LastPte);
// //
@ -988,10 +1052,8 @@ MmProbeAndLockPages(IN PMDL Mdl,
} }
else else
{ {
// /* Release process working set */
// Release process address space lock MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
//
MmUnlockAddressSpace(AddressSpace);
} }
// //
@ -1018,10 +1080,8 @@ CleanupWithLock:
} }
else else
{ {
// /* Release process working set */
// Release process address space lock MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
//
MmUnlockAddressSpace(AddressSpace);
} }
Cleanup: Cleanup:
// //
@ -1048,6 +1108,8 @@ MmUnlockPages(IN PMDL Mdl)
PVOID Base; PVOID Base;
ULONG Flags, PageCount; ULONG Flags, PageCount;
KIRQL OldIrql; KIRQL OldIrql;
USHORT RefCount, OldRefCount;
PMMPFN Pfn1;
DPRINT("Unlocking MDL: %p\n", Mdl); DPRINT("Unlocking MDL: %p\n", Mdl);
// //
@ -1107,17 +1169,71 @@ MmUnlockPages(IN PMDL Mdl)
// //
// Last page, break out // Last page, break out
// //
if (*MdlPages == -1) break; if (*MdlPages == LIST_HEAD) break;
// //
// Check if this page is in the PFN database // Check if this page is in the PFN database
// //
if (*MdlPages <= MmHighestPhysicalPage) Pfn1 = MiGetPfnEntry(*MdlPages);
if (Pfn1);
{ {
// /* Get the current entry and reference count */
// Unlock and dereference OldRefCount = Pfn1->u3.e2.ReferenceCount;
// ASSERT(OldRefCount != 0);
MmDereferencePage(*MdlPages);
/* Is this already the last dereference */
if (OldRefCount == 1)
{
/* It should be on a free list waiting for us */
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount == 0);
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* One less page */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
/* Do the last dereference, we're done here */
MiDecrementReferenceCount(Pfn1, *MdlPages);
}
else
{
/* Loop decrementing one reference */
do
{
/* Make sure it's still valid */
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
/* Take off one reference */
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
OldRefCount - 1,
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
ASSERT(RefCount > 1);
/* Are there only lock references left? */
if (RefCount == 2)
{
/* And does the page still have users? */
if (Pfn1->u2.ShareCount >= 1)
{
/* Then it should still be valid */
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* But there is one less "locked" page though */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
}
}
}
} }
} while (++MdlPages < LastPage); } while (++MdlPages < LastPage);
@ -1169,7 +1285,7 @@ MmUnlockPages(IN PMDL Mdl)
// //
// Last page reached // Last page reached
// //
if (*MdlPages == -1) if (*MdlPages == LIST_HEAD)
{ {
// //
// Were there no pages at all? // Were there no pages at all?
@ -1190,10 +1306,9 @@ MmUnlockPages(IN PMDL Mdl)
break; break;
} }
// /* Save the PFN entry instead for the secondary loop */
// Sanity check *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
// ASSERT((*MdlPages) != 0);
ASSERT(*MdlPages <= MmHighestPhysicalPage);
} while (++MdlPages < LastPage); } while (++MdlPages < LastPage);
// //
@ -1207,10 +1322,64 @@ MmUnlockPages(IN PMDL Mdl)
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
do do
{ {
// /* Get the current entry and reference count */
// Unlock and dereference Pfn1 = (PMMPFN)(*MdlPages);
// OldRefCount = Pfn1->u3.e2.ReferenceCount;
MmDereferencePage(*MdlPages); ASSERT(OldRefCount != 0);
/* Is this already the last dereference */
if (OldRefCount == 1)
{
/* It should be on a free list waiting for us */
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount == 0);
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* One less page */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
/* Do the last dereference, we're done here */
MiDecrementReferenceCount(Pfn1, *MdlPages);
}
else
{
/* Loop decrementing one reference */
do
{
/* Make sure it's still valid */
OldRefCount = Pfn1->u3.e2.ReferenceCount;
ASSERT(OldRefCount != 0);
/* Take off one reference */
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
OldRefCount - 1,
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
ASSERT(RefCount > 1);
/* Are there only lock references left? */
if (RefCount == 2)
{
/* And does the page still have users? */
if (Pfn1->u2.ShareCount >= 1)
{
/* Then it should still be valid */
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* But there is one less "locked" page though */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
}
}
}
} while (++MdlPages < LastPage); } while (++MdlPages < LastPage);
// //

View file

@ -1058,6 +1058,13 @@ MiDecrementShareCount(
IN PFN_NUMBER PageFrameIndex IN PFN_NUMBER PageFrameIndex
); );
VOID
NTAPI
MiDecrementReferenceCount(
IN PMMPFN Pfn1,
IN PFN_NUMBER PageFrameIndex
);
PFN_NUMBER PFN_NUMBER
NTAPI NTAPI
MiRemoveAnyPage( MiRemoveAnyPage(
@ -1233,4 +1240,18 @@ MiRemoveZeroPageSafe(IN ULONG Color)
return 0; return 0;
} }
//
// New ARM3<->RosMM PAGE Architecture
//
#define MI_GET_ROS_DATA(x) ((PMMROSPFN)(x->RosMmData))
#define MI_IS_ROS_PFN(x) (((x)->u4.AweAllocation == TRUE) && (MI_GET_ROS_DATA(x) != NULL))
#define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
typedef struct _MMROSPFN
{
PMM_RMAP_ENTRY RmapListHead;
SWAPENTRY SwapEntry;
} MMROSPFN, *PMMROSPFN;
#define RosMmData AweReferenceCount
/* EOF */ /* EOF */

View file

@ -555,7 +555,7 @@ MiInsertPageInFreeList(IN PFN_NUMBER PageFrameIndex)
Pfn1->u4.PteFrame = MiGetPfnEntryIndex(Blink); Pfn1->u4.PteFrame = MiGetPfnEntryIndex(Blink);
/* If there is an original pte, it should be an old link, NOT a ReactOS RMAP */ /* If there is an original pte, it should be an old link, NOT a ReactOS RMAP */
ASSERT(Blink->u3.e1.ParityError == FALSE); ASSERT(Blink->u4.AweAllocation == FALSE);
Blink->OriginalPte.u.Long = PageFrameIndex; Blink->OriginalPte.u.Long = PageFrameIndex;
} }
@ -563,7 +563,7 @@ MiInsertPageInFreeList(IN PFN_NUMBER PageFrameIndex)
ColorTable->Blink = Pfn1; ColorTable->Blink = Pfn1;
/* If there is an original pte, it should be an old link, NOT a ReactOS RMAP */ /* If there is an original pte, it should be an old link, NOT a ReactOS RMAP */
ASSERT(Pfn1->u3.e1.ParityError == FALSE); ASSERT(Pfn1->u4.AweAllocation == FALSE);
Pfn1->OriginalPte.u.Long = LIST_HEAD; Pfn1->OriginalPte.u.Long = LIST_HEAD;
/* And increase the count in the colored list */ /* And increase the count in the colored list */
@ -667,7 +667,7 @@ MiInsertPageInList(IN PMMPFNLIST ListHead,
Flink = ColorHead->Flink; Flink = ColorHead->Flink;
/* If there is an original pte, it should be an old link, NOT a ReactOS RMAP */ /* If there is an original pte, it should be an old link, NOT a ReactOS RMAP */
ASSERT(Pfn1->u3.e1.ParityError == FALSE); ASSERT(Pfn1->u4.AweAllocation == FALSE);
/* Make this page point back to the list, and point forwards to the old head */ /* Make this page point back to the list, and point forwards to the old head */
Pfn1->OriginalPte.u.Long = Flink; Pfn1->OriginalPte.u.Long = Flink;
@ -853,7 +853,7 @@ MiDecrementShareCount(IN PMMPFN Pfn1,
* ways we shouldn't be seeing RMAP entries at this point * ways we shouldn't be seeing RMAP entries at this point
*/ */
ASSERT(Pfn1->OriginalPte.u.Soft.Prototype == 0); ASSERT(Pfn1->OriginalPte.u.Soft.Prototype == 0);
ASSERT(Pfn1->u3.e1.ParityError == FALSE); ASSERT(Pfn1->u4.AweAllocation == FALSE);
/* Mark the page temporarily as valid, we're going to make it free soon */ /* Mark the page temporarily as valid, we're going to make it free soon */
Pfn1->u3.e1.PageLocation = ActiveAndValid; Pfn1->u3.e1.PageLocation = ActiveAndValid;
@ -869,6 +869,49 @@ MiDecrementShareCount(IN PMMPFN Pfn1,
} }
} }
VOID
NTAPI
MiDecrementReferenceCount(IN PMMPFN Pfn1,
IN PFN_NUMBER PageFrameIndex)
{
/* PFN lock must be held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
/* Sanity checks on the page */
ASSERT(PageFrameIndex < MmHighestPhysicalPage);
ASSERT(Pfn1 == MiGetPfnEntry(PageFrameIndex));
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
/* Dereference the page, bail out if it's still alive */
InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
if (Pfn1->u3.e2.ReferenceCount) return;
/* Nobody should still have reference to this page */
if (Pfn1->u2.ShareCount != 0)
{
/* Otherwise something's really wrong */
KeBugCheckEx(PFN_LIST_CORRUPT, 7, PageFrameIndex, Pfn1->u2.ShareCount, 0);
}
/* And it should be lying on some page list */
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
/* Did someone set the delete flag? */
if (MI_IS_PFN_DELETED(Pfn1))
{
/* Insert it into the free list, there's nothing left to do */
MiInsertPageInFreeList(PageFrameIndex);
return;
}
/* We don't have a modified list yet */
ASSERT(Pfn1->u3.e1.Modified == 0);
ASSERT(Pfn1->u3.e1.RemovalRequested == 0);
/* FIXME: Normally it would go on the standby list, but we're pushing it on the free list */
MiInsertPageInFreeList(PageFrameIndex);
}
VOID VOID
NTAPI NTAPI
MiInitializePfnForOtherProcess(IN PFN_NUMBER PageFrameIndex, MiInitializePfnForOtherProcess(IN PFN_NUMBER PageFrameIndex,

View file

@ -23,13 +23,7 @@
/* GLOBALS ****************************************************************/ /* GLOBALS ****************************************************************/
//
//
// ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
//
// REACTOS NT
//
#define RmapListHead AweReferenceCount
#define PHYSICAL_PAGE MMPFN #define PHYSICAL_PAGE MMPFN
#define PPHYSICAL_PAGE PMMPFN #define PPHYSICAL_PAGE PMMPFN
@ -159,7 +153,7 @@ MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
KIRQL OldIrql; KIRQL OldIrql;
PPHYSICAL_PAGE Pfn1; PPHYSICAL_PAGE Pfn1;
INT LookForZeroedPages; INT LookForZeroedPages;
ASSERT (KeGetCurrentIrql() <= APC_LEVEL); ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
// //
// Convert the low address into a PFN // Convert the low address into a PFN
@ -283,6 +277,7 @@ MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue; if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
/* Remove the page from the free or zero list */ /* Remove the page from the free or zero list */
ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
MiUnlinkFreeOrZeroedPage(Pfn1); MiUnlinkFreeOrZeroedPage(Pfn1);
// //
@ -294,9 +289,13 @@ MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
// Now setup the page and mark it // Now setup the page and mark it
// //
Pfn1->u3.e2.ReferenceCount = 1; Pfn1->u3.e2.ReferenceCount = 1;
Pfn1->u2.ShareCount = 1;
MI_SET_PFN_DELETED(Pfn1);
Pfn1->u4.PteFrame = 0x1FFEDCB;
Pfn1->u3.e1.StartOfAllocation = 1; Pfn1->u3.e1.StartOfAllocation = 1;
Pfn1->u3.e1.EndOfAllocation = 1; Pfn1->u3.e1.EndOfAllocation = 1;
Pfn1->u4.VerifierAllocation = 0;
// //
// Save this page into the MDL // Save this page into the MDL
// //
@ -338,7 +337,7 @@ MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
// //
// Terminate the MDL array if there's certain missing pages // Terminate the MDL array if there's certain missing pages
// //
if (PagesFound != PageCount) *MdlPage = -1; if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
// //
// Now go back and loop over all the MDL pages // Now go back and loop over all the MDL pages
@ -351,7 +350,7 @@ MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
// Check if we've reached the end // Check if we've reached the end
// //
Page = *MdlPage++; Page = *MdlPage++;
if (Page == (PFN_NUMBER)-1) break; if (Page == LIST_HEAD) break;
// //
// Get the PFN entry for the page and check if we should zero it out // Get the PFN entry for the page and check if we should zero it out
@ -374,77 +373,29 @@ VOID
NTAPI NTAPI
MmDumpPfnDatabase(VOID) MmDumpPfnDatabase(VOID)
{ {
ULONG i; /* Pretty useless for now, to be improved later */
PPHYSICAL_PAGE Pfn1; return;
PCHAR State = "????", Type = "Unknown";
KIRQL OldIrql;
ULONG Totals[5] = {0}, FreePages = 0;
KeRaiseIrql(HIGH_LEVEL, &OldIrql);
//
// Loop the PFN database
//
for (i = 0; i <= MmHighestPhysicalPage; i++)
{
Pfn1 = MiGetPfnEntry(i);
if (!Pfn1) continue;
//
// Get the type
//
if (MiIsPfnInUse(Pfn1))
{
State = "Used";
}
else
{
State = "Free";
Type = "Free";
FreePages++;
break;
}
//
// Pretty-print the page
//
DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
i << PAGE_SHIFT,
State,
Type,
Pfn1->u3.e2.ReferenceCount,
Pfn1->RmapListHead);
}
DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
KeLowerIrql(OldIrql);
} }
VOID VOID
NTAPI NTAPI
MmSetRmapListHeadPage(PFN_NUMBER Pfn, struct _MM_RMAP_ENTRY* ListHead) MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
{ {
KIRQL oldIrql; KIRQL oldIrql;
PMMPFN Pfn1; PMMPFN Pfn1;
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Pfn1 = MiGetPfnEntry(Pfn); Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
if (ListHead) if (ListHead)
{ {
/* Should not be trying to insert an RMAP for a non-active page */ /* Should not be trying to insert an RMAP for a non-active page */
ASSERT(MiIsPfnInUse(Pfn1) == TRUE); ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
/* Set the list head address */ /* Set the list head address */
Pfn1->RmapListHead = (LONG)ListHead; MI_GET_ROS_DATA(Pfn1)->RmapListHead = ListHead;
/* Mark that the page has an actual RMAP, not a residual color link */
Pfn1->u3.e1.ParityError = TRUE;
} }
else else
{ {
@ -452,23 +403,20 @@ MmSetRmapListHeadPage(PFN_NUMBER Pfn, struct _MM_RMAP_ENTRY* ListHead)
ASSERT(MiIsPfnInUse(Pfn1) == TRUE); ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
/* In this case, the RMAP is actually being removed, so clear field */ /* In this case, the RMAP is actually being removed, so clear field */
Pfn1->RmapListHead = 0; MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
/* Mark that the page has no RMAP, not a residual color link */
Pfn1->u3.e1.ParityError = FALSE;
/* ReactOS semantics will now release the page, which will make it free and enter a colored list */ /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
} }
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql); KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
} }
struct _MM_RMAP_ENTRY* PMM_RMAP_ENTRY
NTAPI NTAPI
MmGetRmapListHeadPage(PFN_NUMBER Pfn) MmGetRmapListHeadPage(PFN_NUMBER Pfn)
{ {
KIRQL oldIrql; KIRQL oldIrql;
struct _MM_RMAP_ENTRY* ListHead; PMM_RMAP_ENTRY ListHead;
PMMPFN Pfn1; PMMPFN Pfn1;
/* Lock PFN database */ /* Lock PFN database */
@ -476,15 +424,11 @@ MmGetRmapListHeadPage(PFN_NUMBER Pfn)
/* Get the entry */ /* Get the entry */
Pfn1 = MiGetPfnEntry(Pfn); Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
/* Check if the page doesn't really have an RMAP */ ASSERT_IS_ROS_PFN(Pfn1);
if (Pfn1->u3.e1.ParityError == FALSE)
{ /* Get the list head */
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql); ListHead = MI_GET_ROS_DATA(Pfn1)->RmapListHead;
return NULL;
}
ListHead = (struct _MM_RMAP_ENTRY*)Pfn1->RmapListHead;
/* Should not have an RMAP for a non-active page */ /* Should not have an RMAP for a non-active page */
ASSERT(MiIsPfnInUse(Pfn1) == TRUE); ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
@ -499,9 +443,14 @@ NTAPI
MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry) MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
{ {
KIRQL oldIrql; KIRQL oldIrql;
PPHYSICAL_PAGE Page;
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
ASSERT_IS_ROS_PFN(Page);
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MiGetPfnEntry(Pfn)->u1.WsIndex = SwapEntry; MI_GET_ROS_DATA(Page)->SwapEntry = SwapEntry;
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql); KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
} }
@ -511,9 +460,14 @@ MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
{ {
SWAPENTRY SwapEntry; SWAPENTRY SwapEntry;
KIRQL oldIrql; KIRQL oldIrql;
PPHYSICAL_PAGE Page;
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
ASSERT_IS_ROS_PFN(Page);
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
SwapEntry = MiGetPfnEntry(Pfn)->u1.WsIndex; SwapEntry = MI_GET_ROS_DATA(Page)->SwapEntry;
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql); KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
return(SwapEntry); return(SwapEntry);
@ -534,7 +488,8 @@ MmReferencePage(PFN_NUMBER Pfn)
Page = MiGetPfnEntry(Pfn); Page = MiGetPfnEntry(Pfn);
ASSERT(Page); ASSERT(Page);
ASSERT_IS_ROS_PFN(Page);
Page->u3.e2.ReferenceCount++; Page->u3.e2.ReferenceCount++;
} }
@ -551,6 +506,7 @@ MmGetReferenceCountPage(PFN_NUMBER Pfn)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn); Page = MiGetPfnEntry(Pfn);
ASSERT(Page); ASSERT(Page);
ASSERT_IS_ROS_PFN(Page);
RCount = Page->u3.e2.ReferenceCount; RCount = Page->u3.e2.ReferenceCount;
@ -570,17 +526,22 @@ NTAPI
MmDereferencePage(PFN_NUMBER Pfn) MmDereferencePage(PFN_NUMBER Pfn)
{ {
PPHYSICAL_PAGE Page; PPHYSICAL_PAGE Page;
DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT); DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
Page = MiGetPfnEntry(Pfn); Page = MiGetPfnEntry(Pfn);
ASSERT(Page); ASSERT(Page);
ASSERT_IS_ROS_PFN(Page);
Page->u3.e2.ReferenceCount--; Page->u3.e2.ReferenceCount--;
if (Page->u3.e2.ReferenceCount == 0) if (Page->u3.e2.ReferenceCount == 0)
{ {
/* Mark the page temporarily as valid, we're going to make it free soon */ /* Mark the page temporarily as valid, we're going to make it free soon */
Page->u3.e1.PageLocation = ActiveAndValid; Page->u3.e1.PageLocation = ActiveAndValid;
/* It's not a ROS PFN anymore */
Page->u4.AweAllocation = FALSE;
ExFreePool(MI_GET_ROS_DATA(Page));
Page->RosMmData = 0;
/* Bring it back into the free list */ /* Bring it back into the free list */
DPRINT("Legacy free: %lx\n", Pfn); DPRINT("Legacy free: %lx\n", Pfn);
@ -614,6 +575,17 @@ MmAllocPage(ULONG Type)
Pfn1 = MiGetPfnEntry(PfnOffset); Pfn1 = MiGetPfnEntry(PfnOffset);
Pfn1->u3.e2.ReferenceCount = 1; Pfn1->u3.e2.ReferenceCount = 1;
Pfn1->u3.e1.PageLocation = ActiveAndValid; Pfn1->u3.e1.PageLocation = ActiveAndValid;
/* This marks the PFN as a ReactOS PFN */
Pfn1->u4.AweAllocation = TRUE;
/* Allocate the extra ReactOS Data and zero it out */
Pfn1->RosMmData = (LONG)ExAllocatePoolWithTag(NonPagedPool, sizeof(MMROSPFN), 'RsPf');
ASSERT(MI_GET_ROS_DATA(Pfn1) != NULL);
ASSERT_IS_ROS_PFN(Pfn1);
MI_GET_ROS_DATA(Pfn1)->SwapEntry = 0;
MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
return PfnOffset; return PfnOffset;
} }

View file

@ -19,17 +19,6 @@
/* TYPES ********************************************************************/ /* TYPES ********************************************************************/
typedef struct _MM_RMAP_ENTRY
{
struct _MM_RMAP_ENTRY* Next;
PEPROCESS Process;
PVOID Address;
#if DBG
PVOID Caller;
#endif
}
MM_RMAP_ENTRY, *PMM_RMAP_ENTRY;
/* GLOBALS ******************************************************************/ /* GLOBALS ******************************************************************/
static FAST_MUTEX RmapListLock; static FAST_MUTEX RmapListLock;