[NTOS:CC] Performance improvements

Do not ditch the pages as soon as the section are unmapped
Improve MmBalancer "algorithm" (or whatever you call that)
Various needed fixes to get this going.
This commit is contained in:
Jérôme Gardou 2021-01-28 15:42:12 +01:00
parent 7fbf90d6fd
commit 41475dfcd7
7 changed files with 314 additions and 202 deletions

View file

@ -5,8 +5,6 @@
/* TYPES *********************************************************************/
#define MM_SEGMENT_FINALIZE (0x40000000)
#define RMAP_SEGMENT_MASK ~((ULONG_PTR)0xff)
#define RMAP_IS_SEGMENT(x) (((ULONG_PTR)(x) & RMAP_SEGMENT_MASK) == RMAP_SEGMENT_MASK)
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))

View file

@ -830,6 +830,8 @@ NTAPI
MmRebalanceMemoryConsumers(VOID);
/* rmap.c **************************************************************/
#define RMAP_SEGMENT_MASK ~((ULONG_PTR)0xff)
#define RMAP_IS_SEGMENT(x) (((ULONG_PTR)(x) & RMAP_SEGMENT_MASK) == RMAP_SEGMENT_MASK)
VOID
NTAPI
@ -1189,6 +1191,14 @@ MmIsDirtyPage(
PVOID Address
);
VOID
NTAPI
MmClearPageAccessedBit(PEPROCESS Process, PVOID Address);
BOOLEAN
NTAPI
MmIsPageAccessed(PEPROCESS Process, PVOID Address);
/* wset.c ********************************************************************/
NTSTATUS

View file

@ -113,14 +113,13 @@ MiTrimMemoryConsumer(ULONG Consumer, ULONG InitialTarget)
Target = (ULONG)max(Target, MiMinimumAvailablePages - MmAvailablePages);
}
/* Don't be too greedy if we're not in a hurry */
if (MmAvailablePages > MiMinimumAvailablePages)
Target = min(Target, 256);
/* Don't be too greedy in one run */
Target = min(Target, 256);
if (Target)
{
/* Now swap the pages out */
Status = MiMemoryConsumers[Consumer].Trim(Target, 0, &NrFreedPages);
Status = MiMemoryConsumers[Consumer].Trim(Target, MmAvailablePages < MiMinimumAvailablePages, &NrFreedPages);
DPRINT("Trimming consumer %lu: Freed %lu pages with a target of %lu pages\n", Consumer, NrFreedPages, Target);
@ -142,15 +141,104 @@ MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages)
(*NrFreedPages) = 0;
DPRINT1("MM BALANCER: %s\n", Priority ? "Paging out!" : "Removing access bit!");
CurrentPage = MmGetLRUFirstUserPage();
while (CurrentPage != 0 && Target > 0)
{
Status = MmPageOutPhysicalAddress(CurrentPage);
if (NT_SUCCESS(Status))
if (Priority)
{
DPRINT("Succeeded\n");
Status = MmPageOutPhysicalAddress(CurrentPage);
if (NT_SUCCESS(Status))
{
DPRINT("Succeeded\n");
Target--;
(*NrFreedPages)++;
}
}
else
{
/* When not paging-out agressively, just reset the accessed bit */
PEPROCESS Process = NULL;
PVOID Address = NULL;
BOOLEAN Accessed = FALSE;
/*
* We have a lock-ordering problem here. We cant lock the PFN DB before the Process address space.
* So we must use circonvoluted loops.
* Well...
*/
while (TRUE)
{
KAPC_STATE ApcState;
KIRQL OldIrql = MiAcquirePfnLock();
PMM_RMAP_ENTRY Entry = MmGetRmapListHeadPage(CurrentPage);
while (Entry)
{
if (RMAP_IS_SEGMENT(Entry->Address))
{
Entry = Entry->Next;
continue;
}
/* Check that we didn't treat this entry before */
if (Entry->Address < Address)
{
Entry = Entry->Next;
continue;
}
if ((Entry->Address == Address) && (Entry->Process <= Process))
{
Entry = Entry->Next;
continue;
}
break;
}
if (!Entry)
{
MiReleasePfnLock(OldIrql);
break;
}
Process = Entry->Process;
Address = Entry->Address;
MiReleasePfnLock(OldIrql);
KeStackAttachProcess(&Process->Pcb, &ApcState);
MmLockAddressSpace(&Process->Vm);
/* Be sure this is still valid. */
PMMPTE Pte = MiAddressToPte(Address);
if (Pte->u.Hard.Valid)
{
Accessed = Accessed || Pte->u.Hard.Accessed;
Pte->u.Hard.Accessed = 0;
/* There is no need to invalidate, the balancer thread is never on a user process */
//KeInvalidateTlbEntry(Address);
}
MmUnlockAddressSpace(&Process->Vm);
KeUnstackDetachProcess(&ApcState);
}
if (!Accessed)
{
/* Nobody accessed this page since the last time we check. Time to clean up */
Status = MmPageOutPhysicalAddress(CurrentPage);
// DPRINT1("Paged-out one page: %s\n", NT_SUCCESS(Status) ? "Yes" : "No");
(void)Status;
}
/* Done for this page. */
Target--;
(*NrFreedPages)++;
}
CurrentPage = MmGetLRUNextUserPage(CurrentPage, TRUE);
@ -189,78 +277,10 @@ NTAPI
MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
PPFN_NUMBER AllocatedPage)
{
ULONG PagesUsed;
PFN_NUMBER Page;
/*
* Make sure we don't exceed our individual target.
*/
PagesUsed = InterlockedIncrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
if (PagesUsed > MiMemoryConsumers[Consumer].PagesTarget &&
!MiIsBalancerThread())
{
MmRebalanceMemoryConsumers();
}
/*
* Allocate always memory for the non paged pool and for the pager thread.
*/
if (Consumer == MC_SYSTEM)
{
Page = MmAllocPage(Consumer);
if (Page == 0)
{
KeBugCheck(NO_PAGES_AVAILABLE);
}
*AllocatedPage = Page;
if (MmAvailablePages < MiMinimumAvailablePages)
MmRebalanceMemoryConsumers();
return(STATUS_SUCCESS);
}
/*
* Make sure we don't exceed global targets.
*/
if (((MmAvailablePages < MiMinimumAvailablePages) && !MiIsBalancerThread())
|| (MmAvailablePages < (MiMinimumAvailablePages / 2)))
{
MM_ALLOCATION_REQUEST Request;
if (!CanWait)
{
(void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
MmRebalanceMemoryConsumers();
return(STATUS_NO_MEMORY);
}
/* Insert an allocation request. */
Request.Page = 0;
KeInitializeEvent(&Request.Event, NotificationEvent, FALSE);
ExInterlockedInsertTailList(&AllocationListHead, &Request.ListEntry, &AllocationListLock);
MmRebalanceMemoryConsumers();
KeWaitForSingleObject(&Request.Event,
0,
KernelMode,
FALSE,
NULL);
Page = Request.Page;
if (Page == 0)
{
KeBugCheck(NO_PAGES_AVAILABLE);
}
*AllocatedPage = Page;
if (MmAvailablePages < MiMinimumAvailablePages)
{
MmRebalanceMemoryConsumers();
}
return(STATUS_SUCCESS);
}
/* Update the target */
InterlockedIncrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
/*
* Actually allocate the page.
@ -272,11 +292,6 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
}
*AllocatedPage = Page;
if (MmAvailablePages < MiMinimumAvailablePages)
{
MmRebalanceMemoryConsumers();
}
return(STATUS_SUCCESS);
}
@ -407,22 +422,14 @@ MiInitBalancerThread(VOID)
{
KPRIORITY Priority;
NTSTATUS Status;
#if !defined(__GNUC__)
LARGE_INTEGER dummyJunkNeeded;
dummyJunkNeeded.QuadPart = -20000000; /* 2 sec */
;
#endif
LARGE_INTEGER Timeout;
KeInitializeEvent(&MiBalancerEvent, SynchronizationEvent, FALSE);
KeInitializeTimerEx(&MiBalancerTimer, SynchronizationTimer);
Timeout.QuadPart = -20000000; /* 2 sec */
KeSetTimerEx(&MiBalancerTimer,
#if defined(__GNUC__)
(LARGE_INTEGER)(LONGLONG)-20000000LL, /* 2 sec */
#else
dummyJunkNeeded,
#endif
Timeout,
2000, /* 2 sec */
NULL);

View file

@ -139,7 +139,7 @@ MmGetLRUNextUserPage(PFN_NUMBER PreviousPage, BOOLEAN MoveToLast)
* If it's not, then it means it is still hanging in some process address space.
* This avoids paging-out e.g. ntdll early just because it's mapped first time.
*/
if (MoveToLast)
if ((MoveToLast) && (MmGetReferenceCountPage(PreviousPage) > 1))
{
MmRemoveLRUUserPage(PreviousPage);
MmInsertLRULastUserPage(PreviousPage);
@ -424,10 +424,11 @@ VOID
NTAPI
MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
{
KIRQL oldIrql;
PMMPFN Pfn1;
oldIrql = MiAcquirePfnLock();
/* PFN database must be locked */
MI_ASSERT_PFN_LOCK_HELD();
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
@ -450,8 +451,6 @@ MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
/* ReactOS semantics will now release the page, which will make it free and enter a colored list */
}
MiReleasePfnLock(oldIrql);
}
PMM_RMAP_ENTRY

View file

@ -585,6 +585,46 @@ MmSetDirtyPage(PEPROCESS Process, PVOID Address)
}
}
VOID
NTAPI
MmClearPageAccessedBit(PEPROCESS Process, PVOID Address)
{
PULONG Pt;
LONG Pte;
KIRQL OldIrql;
if (Address < MmSystemRangeStart && Process == NULL)
{
DPRINT1("MmClearPageAccessedBit is called for user space without a process.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt == NULL)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
do
{
Pte = *Pt;
} while (Pte != InterlockedCompareExchangePte(Pt, Pte & ~PA_ACCESSED, Pte));
if (!(Pte & PA_PRESENT))
{
KeBugCheck(MEMORY_MANAGEMENT);
}
MiFlushTlb(Pt, Address, OldIrql);
}
BOOLEAN
NTAPI
MmIsPageAccessed(PEPROCESS Process, PVOID Address)
{
return BooleanFlagOn(MmGetPageEntryForProcess(Process, Address), PA_ACCESSED);
}
BOOLEAN
NTAPI
MmIsPagePresent(PEPROCESS Process, PVOID Address)

View file

@ -53,13 +53,14 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
PMM_RMAP_ENTRY entry;
PMEMORY_AREA MemoryArea;
PMMSUPPORT AddressSpace;
PVOID Address;
PEPROCESS Process;
PVOID Address = NULL;
PEPROCESS Process = NULL;
NTSTATUS Status = STATUS_SUCCESS;
PMM_SECTION_SEGMENT Segment;
LARGE_INTEGER SegmentOffset;
KIRQL OldIrql;
GetEntry:
OldIrql = MiAcquirePfnLock();
entry = MmGetRmapListHeadPage(Page);
@ -67,6 +68,16 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
while (entry && RMAP_IS_SEGMENT(entry->Address))
entry = entry->Next;
/* See if we are retrying because the page is actively used */
while (entry && ((entry->Address < Address) || RMAP_IS_SEGMENT(entry->Address)))
entry = entry->Next;
if (entry && (entry->Address == Address))
{
while (entry && ((entry->Process <= Process) || RMAP_IS_SEGMENT(entry->Address)))
entry = entry->Next;
}
if (entry == NULL)
{
MiReleasePfnLock(OldIrql);
@ -81,41 +92,42 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
KeBugCheck(MEMORY_MANAGEMENT);
}
if (Address < MmSystemRangeStart)
{
if (!ExAcquireRundownProtection(&Process->RundownProtect))
{
MiReleasePfnLock(OldIrql);
return STATUS_PROCESS_IS_TERMINATING;
}
/* This is for user-mode address only */
ASSERT(Address < MmSystemRangeStart);
Status = ObReferenceObjectByPointer(Process, PROCESS_ALL_ACCESS, NULL, KernelMode);
MiReleasePfnLock(OldIrql);
if (!NT_SUCCESS(Status))
{
ExReleaseRundownProtection(&Process->RundownProtect);
return Status;
}
AddressSpace = &Process->Vm;
}
else
if (!ExAcquireRundownProtection(&Process->RundownProtect))
{
MiReleasePfnLock(OldIrql);
AddressSpace = MmGetKernelAddressSpace();
return STATUS_PROCESS_IS_TERMINATING;
}
Status = ObReferenceObjectByPointer(Process, PROCESS_ALL_ACCESS, NULL, KernelMode);
MiReleasePfnLock(OldIrql);
if (!NT_SUCCESS(Status))
{
ExReleaseRundownProtection(&Process->RundownProtect);
return Status;
}
AddressSpace = &Process->Vm;
MmLockAddressSpace(AddressSpace);
if ((MmGetPfnForProcess(Process, Address) != Page) || MmIsPageAccessed(Process, Address))
{
/* This changed in the short window where we didn't have any locks */
MmUnlockAddressSpace(AddressSpace);
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
goto GetEntry;
}
MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, Address);
if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
{
MmUnlockAddressSpace(AddressSpace);
if (Address < MmSystemRangeStart)
{
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
}
return(STATUS_UNSUCCESSFUL);
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
goto GetEntry;
}
if (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW)
@ -269,7 +281,7 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
MmDeleteRmap(Page, Process, Address);
/* One less mapping referencing this segment */
Released = MmUnsharePageEntrySectionSegment(MemoryArea, Segment, &Offset, Dirty, FALSE, NULL);
Released = MmUnsharePageEntrySectionSegment(MemoryArea, Segment, &Offset, Dirty, TRUE, NULL);
MmUnlockSectionSegment(Segment);
MmUnlockAddressSpace(AddressSpace);
@ -396,11 +408,7 @@ MmInsertRmap(PFN_NUMBER Page, PEPROCESS Process,
new_entry->Address = Address;
new_entry->Process = (PEPROCESS)Process;
#if DBG
#ifdef __GNUC__
new_entry->Caller = __builtin_return_address(0);
#else
new_entry->Caller = _ReturnAddress();
#endif
#endif
if (
@ -417,24 +425,39 @@ MmInsertRmap(PFN_NUMBER Page, PEPROCESS Process,
OldIrql = MiAcquirePfnLock();
current_entry = MmGetRmapListHeadPage(Page);
new_entry->Next = current_entry;
#if DBG
while (current_entry)
PMM_RMAP_ENTRY previous_entry = NULL;
/* Keep the list sorted */
while (current_entry && (current_entry->Address < Address))
{
if (current_entry->Address == new_entry->Address && current_entry->Process == new_entry->Process)
{
DbgPrint("MmInsertRmap tries to add a second rmap entry for address %p\n current caller ",
current_entry->Address);
DbgPrint("%p", new_entry->Caller);
DbgPrint("\n previous caller ");
DbgPrint("%p", current_entry->Caller);
DbgPrint("\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
previous_entry = current_entry;
current_entry = current_entry->Next;
}
#endif
MmSetRmapListHeadPage(Page, new_entry);
/* In case of clash in the address, sort by process */
if (current_entry && (current_entry->Address == Address))
{
while (current_entry && (current_entry->Process < Process))
{
previous_entry = current_entry;
current_entry = current_entry->Next;
}
}
if (current_entry && (current_entry->Address == Address) && (current_entry->Process == Process))
{
DbgPrint("MmInsertRmap tries to add a second rmap entry for address %p\n", current_entry->Address);
DbgPrint(" current caller %p\n", new_entry->Caller);
DbgPrint(" previous caller %p\n", current_entry->Caller);
KeBugCheck(MEMORY_MANAGEMENT);
}
new_entry->Next = current_entry;
if (previous_entry)
previous_entry->Next = new_entry;
else
MmSetRmapListHeadPage(Page, new_entry);
MiReleasePfnLock(OldIrql);
if (!RMAP_IS_SEGMENT(Address))

View file

@ -1091,8 +1091,7 @@ MmUnsharePageEntrySectionSegment(PMEMORY_AREA MemoryArea,
{
ULONG_PTR Entry = InEntry ? *InEntry : MmGetPageEntrySectionSegment(Segment, Offset);
PFN_NUMBER Page = PFN_FROM_SSE(Entry);
ULONG_PTR NewEntry = 0;
SWAPENTRY SwapEntry;
BOOLEAN IsDataMap = BooleanFlagOn(*Segment->Flags, MM_DATAFILE_SEGMENT);
if (Entry == 0)
{
@ -1111,64 +1110,53 @@ MmUnsharePageEntrySectionSegment(PMEMORY_AREA MemoryArea,
Entry = DECREF_SSE(Entry);
if (Dirty) Entry = DIRTY_SSE(Entry);
if (SHARE_COUNT_FROM_SSE(Entry) > 0)
/* If we are paging-out, pruning the page for real will be taken care of in MmCheckDirtySegment */
if ((SHARE_COUNT_FROM_SSE(Entry) > 0) || PageOut)
{
/* Update the page mapping in the segment and we're done */
if (InEntry)
*InEntry = Entry;
else
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
return FALSE;
}
if (IS_DIRTY_SSE(Entry) && (MemoryArea->VadNode.u.VadFlags.VadType != VadImageMap))
{
ASSERT(!Segment->WriteCopy);
ASSERT(MmGetSavedSwapEntryPage(Page) == 0);
/* The entry must be written back to the disk, so let this in the segment, the page-out thread will take care of this */
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
return FALSE;
}
/* Only valid case for shared dirty pages is shared image section */
ASSERT(!IS_DIRTY_SSE(Entry) || (Segment->Image.Characteristics & IMAGE_SCN_MEM_SHARED));
/* We are pruning the last mapping on this page. See if we can keep it a bit more. */
ASSERT(!PageOut);
SwapEntry = MmGetSavedSwapEntryPage(Page);
if (IS_DIRTY_SSE(Entry) && !SwapEntry)
if (IsDataMap)
{
SwapEntry = MmAllocSwapPage();
if (!SwapEntry)
{
/* We can't have a swap entry for this page. Let the segment keep it */
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
return FALSE;
}
/* We can always keep memory in for data maps */
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
return FALSE;
}
if (IS_DIRTY_SSE(Entry))
if (!BooleanFlagOn(Segment->Image.Characteristics, IMAGE_SCN_MEM_SHARED))
{
NTSTATUS Status = MmWriteToSwapPage(SwapEntry, Page);
if (!NT_SUCCESS(Status))
{
/* We failed. Clean up */
MmSetSavedSwapEntryPage(Page, 0);
MmFreeSwapPage(SwapEntry);
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
return FALSE;
}
/* So this must have been a read-only page. Keep it ! */
ASSERT(Segment->WriteCopy);
ASSERT(!IS_DIRTY_SSE(Entry));
ASSERT(MmGetSavedSwapEntryPage(Page) == 0);
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
return FALSE;
}
/*
* So this is a page for a shared section of a DLL.
* We can keep it if it is not dirty.
*/
SWAPENTRY SwapEntry = MmGetSavedSwapEntryPage(Page);
if ((SwapEntry == 0) && !IS_DIRTY_SSE(Entry))
{
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
return FALSE;
}
/* No more processes are referencing this shared dirty page. Ditch it. */
if (SwapEntry)
{
NewEntry = MAKE_SWAP_SSE(SwapEntry);
MmSetSavedSwapEntryPage(Page, 0);
MmFreeSwapPage(SwapEntry);
}
/* We can let this go */
MmSetPageEntrySectionSegment(Segment, Offset, NewEntry);
MmSetPageEntrySectionSegment(Segment, Offset, 0);
MmReleasePageMemoryConsumer(MC_USER, Page);
MiSetPageEvent(NULL, NULL);
return TRUE;
}
@ -4849,9 +4837,12 @@ MmCheckDirtySegment(
{
BOOLEAN DirtyAgain;
/* We got a dirty entry. Is this segment copy on write */
/*
* We got a dirty entry. This path is for the shared data,
* be-it regular file maps or shared sections of DLLs
*/
ASSERT(!Segment->WriteCopy);
ASSERT(Segment->SegFlags & MM_DATAFILE_SEGMENT);
ASSERT(FlagOn(*Segment->Flags, MM_DATAFILE_SEGMENT) || FlagOn(Segment->Image.Characteristics, IMAGE_SCN_MEM_SHARED));
/* Insert the cleaned entry back. Mark it as write in progress, and clear the dirty bit. */
Entry = MAKE_SSE(PAGE_FROM_SSE(Entry), SHARE_COUNT_FROM_SSE(Entry) + 1);
@ -4863,17 +4854,52 @@ MmCheckDirtySegment(
MmUnlockSectionSegment(Segment);
/* Tell the FS driver who we are */
if (PageOut)
IoSetTopLevelIrp((PIRP)FSRTL_MOD_WRITE_TOP_LEVEL_IRP);
if (FlagOn(*Segment->Flags, MM_DATAFILE_SEGMENT))
{
/* We have to write it back to the file. Tell the FS driver who we are */
if (PageOut)
IoSetTopLevelIrp((PIRP)FSRTL_MOD_WRITE_TOP_LEVEL_IRP);
/* Go ahead and write the page */
DPRINT("Writing page at offset %I64d for file %wZ, Pageout: %s\n",
Offset->QuadPart, &Segment->FileObject->FileName, PageOut ? "TRUE" : "FALSE");
Status = MiWritePage(Segment, Offset->QuadPart, Page);
/* Go ahead and write the page */
DPRINT("Writing page at offset %I64d for file %wZ, Pageout: %s\n",
Offset->QuadPart, &Segment->FileObject->FileName, PageOut ? "TRUE" : "FALSE");
Status = MiWritePage(Segment, Offset->QuadPart, Page);
if (PageOut)
IoSetTopLevelIrp(NULL);
if (PageOut)
IoSetTopLevelIrp(NULL);
}
else
{
/* This must only be called by the page-out path */
ASSERT(PageOut);
/* And this must be for a shared section in a DLL */
ASSERT(Segment->Image.Characteristics & IMAGE_SCN_MEM_SHARED);
SWAPENTRY SwapEntry = MmGetSavedSwapEntryPage(Page);
if (!SwapEntry)
{
SwapEntry = MmAllocSwapPage();
}
if (SwapEntry)
{
Status = MmWriteToSwapPage(SwapEntry, Page);
if (NT_SUCCESS(Status))
{
MmSetSavedSwapEntryPage(Page, SwapEntry);
}
else
{
MmFreeSwapPage(SwapEntry);
}
}
else
{
DPRINT1("Failed to allocate a swap page!\n");
Status = STATUS_INSUFFICIENT_RESOURCES;
}
}
MmLockSectionSegment(Segment);
@ -4905,8 +4931,17 @@ MmCheckDirtySegment(
/* Were this page hanging there just for the sake of being present ? */
if (!IS_DIRTY_SSE(Entry) && (SHARE_COUNT_FROM_SSE(Entry) == 0) && PageOut)
{
ULONG_PTR NewEntry = 0;
/* Restore the swap entry here */
if (!FlagOn(*Segment->Flags, MM_DATAFILE_SEGMENT))
{
SWAPENTRY SwapEntry = MmGetSavedSwapEntryPage(Page);
if (SwapEntry)
NewEntry = MAKE_SWAP_SSE(SwapEntry);
}
/* Yes. Release it */
MmSetPageEntrySectionSegment(Segment, Offset, 0);
MmSetPageEntrySectionSegment(Segment, Offset, NewEntry);
MmReleasePageMemoryConsumer(MC_USER, Page);
/* Tell the caller we released the page */
return TRUE;