mirror of
https://github.com/reactos/reactos.git
synced 2024-12-28 10:04:49 +00:00
- Implemented Harmutt Birr's suggestion of a seperate thread
to do balancing. - Move pages that can't be paged out because the pagefile is full to the end of the LRU list so we don't waste time checking them again. svn path=/trunk/; revision=5092
This commit is contained in:
parent
e41f7f3329
commit
4ae404aaa8
6 changed files with 97 additions and 51 deletions
|
@ -632,5 +632,13 @@ BOOLEAN
|
|||
MmIsAvailableSwapPage(VOID);
|
||||
VOID
|
||||
MmShowOutOfSpaceMessagePagingFile(VOID);
|
||||
VOID
|
||||
MmRebalanceMemoryConsumers(VOID);
|
||||
BOOLEAN
|
||||
MiIsPagerThread(VOID);
|
||||
VOID
|
||||
MiStartPagerThread(VOID);
|
||||
VOID
|
||||
MmSetLRULastPage(PHYSICAL_ADDRESS PhysicalAddress);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
/* $Id: anonmem.c,v 1.17 2003/07/11 01:23:15 royce Exp $
|
||||
/* $Id: anonmem.c,v 1.18 2003/07/12 01:52:10 dwelch Exp $
|
||||
*
|
||||
* PROJECT: ReactOS kernel
|
||||
* FILE: ntoskrnl/mm/anonmem.c
|
||||
|
@ -193,7 +193,7 @@ MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
|
|||
PageOp->Status = STATUS_UNSUCCESSFUL;
|
||||
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
|
||||
MmReleasePageOp(PageOp);
|
||||
return(STATUS_UNSUCCESSFUL);
|
||||
return(STATUS_PAGEFILE_QUOTA);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
/* $Id: balance.c,v 1.17 2003/07/10 21:05:03 royce Exp $
|
||||
/* $Id: balance.c,v 1.18 2003/07/12 01:52:10 dwelch Exp $
|
||||
*
|
||||
* PROJECT: ReactOS kernel
|
||||
* FILE: ntoskrnl/mm/balance.c
|
||||
|
@ -59,8 +59,6 @@ static ULONG MiNrAvailablePages;
|
|||
static ULONG MiNrTotalPages;
|
||||
static LIST_ENTRY AllocationListHead;
|
||||
static KSPIN_LOCK AllocationListLock;
|
||||
static ULONG NrWorkingThreads = 0;
|
||||
static HANDLE WorkerThreadId;
|
||||
static ULONG MiPagesRequired = 0;
|
||||
static ULONG MiMinimumPagesPerRun = 10;
|
||||
|
||||
|
@ -161,7 +159,7 @@ MiTrimMemoryConsumer(ULONG Consumer)
|
|||
}
|
||||
|
||||
VOID
|
||||
MiRebalanceMemoryConsumers(VOID)
|
||||
MmRebalanceMemoryConsumers(VOID)
|
||||
{
|
||||
LONG Target;
|
||||
ULONG i;
|
||||
|
@ -203,7 +201,7 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
|
|||
*/
|
||||
OldUsed = InterlockedIncrement((LONG *)&MiMemoryConsumers[Consumer].PagesUsed);
|
||||
if (OldUsed >= (MiMemoryConsumers[Consumer].PagesTarget - 1) &&
|
||||
WorkerThreadId != PsGetCurrentThreadId())
|
||||
!MiIsPagerThread())
|
||||
{
|
||||
if (!CanWait)
|
||||
{
|
||||
|
@ -234,21 +232,8 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
|
|||
InterlockedIncrement((LONG *)&MiPagesRequired);
|
||||
|
||||
KeAcquireSpinLock(&AllocationListLock, &oldIrql);
|
||||
if (NrWorkingThreads == 0)
|
||||
{
|
||||
InsertTailList(&AllocationListHead, &Request.ListEntry);
|
||||
NrWorkingThreads++;
|
||||
KeReleaseSpinLock(&AllocationListLock, oldIrql);
|
||||
WorkerThreadId = PsGetCurrentThreadId();
|
||||
MiRebalanceMemoryConsumers();
|
||||
KeAcquireSpinLock(&AllocationListLock, &oldIrql);
|
||||
NrWorkingThreads--;
|
||||
WorkerThreadId = 0;
|
||||
KeReleaseSpinLock(&AllocationListLock, oldIrql);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (WorkerThreadId == PsGetCurrentThreadId())
|
||||
/* Always let the pager thread itself allocate memory. */
|
||||
if (MiIsPagerThread())
|
||||
{
|
||||
Page = MmAllocPage(Consumer, 0);
|
||||
KeReleaseSpinLock(&AllocationListLock, oldIrql);
|
||||
|
@ -260,9 +245,11 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
|
|||
InterlockedDecrement((LONG *)&MiPagesRequired);
|
||||
return(STATUS_SUCCESS);
|
||||
}
|
||||
/* Otherwise start the pager thread if it isn't already working. */
|
||||
MiStartPagerThread();
|
||||
InsertTailList(&AllocationListHead, &Request.ListEntry);
|
||||
KeReleaseSpinLock(&AllocationListLock, oldIrql);
|
||||
}
|
||||
|
||||
KeWaitForSingleObject(&Request.Event,
|
||||
0,
|
||||
KernelMode,
|
||||
|
|
|
@ -70,6 +70,11 @@ MmTransferOwnershipPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG NewConsumer)
|
|||
KIRQL oldIrql;
|
||||
|
||||
KeAcquireSpinLock(&PageListLock, &oldIrql);
|
||||
if (MmPageArray[Start].MapCount != 0)
|
||||
{
|
||||
DbgPrint("Transfering mapped page.\n");
|
||||
KeBugCheck(0);
|
||||
}
|
||||
RemoveEntryList(&MmPageArray[Start].ListEntry);
|
||||
InsertTailList(&UsedPageListHeads[NewConsumer],
|
||||
&MmPageArray[Start].ListEntry);
|
||||
|
@ -100,6 +105,23 @@ MmGetLRUFirstUserPage(VOID)
|
|||
return(Next);
|
||||
}
|
||||
|
||||
VOID
|
||||
MmSetLRULastPage(PHYSICAL_ADDRESS PhysicalAddress)
|
||||
{
|
||||
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
|
||||
KIRQL oldIrql;
|
||||
|
||||
KeAcquireSpinLock(&PageListLock, &oldIrql);
|
||||
if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_USED &&
|
||||
MmPageArray[Start].Flags.Consumer == MC_USER)
|
||||
{
|
||||
RemoveEntryList(&MmPageArray[Start].ListEntry);
|
||||
InsertTailList(&UsedPageListHeads[MC_USER],
|
||||
&MmPageArray[Start].ListEntry);
|
||||
}
|
||||
KeReleaseSpinLock(&PageListLock, oldIrql);
|
||||
}
|
||||
|
||||
PHYSICAL_ADDRESS
|
||||
MmGetLRUNextUserPage(PHYSICAL_ADDRESS PreviousPhysicalAddress)
|
||||
{
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $Id: pager.c,v 1.11 2002/09/08 10:23:36 chorns Exp $
|
||||
/* $Id: pager.c,v 1.12 2003/07/12 01:52:10 dwelch Exp $
|
||||
*
|
||||
* COPYRIGHT: See COPYING in the top level directory
|
||||
* PROJECT: ReactOS kernel
|
||||
|
@ -25,9 +25,28 @@ static HANDLE PagerThreadHandle;
|
|||
static CLIENT_ID PagerThreadId;
|
||||
static KEVENT PagerThreadEvent;
|
||||
static BOOLEAN PagerThreadShouldTerminate;
|
||||
static ULONG PagerThreadWorking;
|
||||
|
||||
/* FUNCTIONS *****************************************************************/
|
||||
|
||||
BOOLEAN
|
||||
MiIsPagerThread(VOID)
|
||||
{
|
||||
return(PsGetCurrentThreadId() == PagerThreadId.UniqueThread);
|
||||
}
|
||||
|
||||
VOID
|
||||
MiStartPagerThread(VOID)
|
||||
{
|
||||
ULONG WasWorking;
|
||||
|
||||
WasWorking = InterlockedExchange(&PagerThreadWorking, 1);
|
||||
if (WasWorking == 0)
|
||||
{
|
||||
KeSetEvent(&PagerThreadEvent, IO_NO_INCREMENT, FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
static NTSTATUS STDCALL
|
||||
MmPagerThreadMain(PVOID Ignored)
|
||||
{
|
||||
|
@ -35,6 +54,7 @@ MmPagerThreadMain(PVOID Ignored)
|
|||
|
||||
for(;;)
|
||||
{
|
||||
/* Wake for a low memory situation or a terminate request. */
|
||||
Status = KeWaitForSingleObject(&PagerThreadEvent,
|
||||
0,
|
||||
KernelMode,
|
||||
|
@ -50,6 +70,10 @@ MmPagerThreadMain(PVOID Ignored)
|
|||
DbgPrint("PagerThread: Terminating\n");
|
||||
return(STATUS_SUCCESS);
|
||||
}
|
||||
/* Try and make some memory available to the system. */
|
||||
MmRebalanceMemoryConsumers();
|
||||
/* Let the rest of the system know we finished this run. */
|
||||
(VOID)InterlockedExchange(&PagerThreadWorking, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,6 +82,7 @@ NTSTATUS MmInitPagerThread(VOID)
|
|||
NTSTATUS Status;
|
||||
|
||||
PagerThreadShouldTerminate = FALSE;
|
||||
PagerThreadWorking = 0;
|
||||
KeInitializeEvent(&PagerThreadEvent,
|
||||
SynchronizationEvent,
|
||||
FALSE);
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
/* $Id: wset.c,v 1.16 2002/09/08 10:23:37 chorns Exp $
|
||||
/* $Id: wset.c,v 1.17 2003/07/12 01:52:10 dwelch Exp $
|
||||
*
|
||||
* PROJECT: ReactOS kernel
|
||||
* FILE: ntoskrnl/mm/wset.c
|
||||
|
@ -59,6 +59,10 @@ MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages)
|
|||
Target--;
|
||||
(*NrFreedPages)++;
|
||||
}
|
||||
else if (Status == STATUS_PAGEFILE_QUOTA)
|
||||
{
|
||||
MmSetLRULastPage(CurrentPhysicalAddress);
|
||||
}
|
||||
|
||||
CurrentPhysicalAddress = NextPhysicalAddress;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue