Virtual memory support

svn path=/trunk/; revision=2462
This commit is contained in:
David Welch 2001-12-31 01:53:46 +00:00
parent 02e3483cf5
commit a155c42316
26 changed files with 1154 additions and 787 deletions

View file

@ -1,4 +1,4 @@
/* $Id: fmutex.c,v 1.2 2001/12/20 03:56:08 dwelch Exp $
/* $Id: fmutex.c,v 1.3 2001/12/31 01:53:44 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -35,9 +35,19 @@ ExReleaseFastMutex (PFAST_MUTEX FastMutex)
BOOLEAN FASTCALL
ExTryToAcquireFastMutex (PFAST_MUTEX FastMutex)
ExTryToAcquireFastMutex (PFAST_MUTEX FastMutex)
{
UNIMPLEMENTED;
KeEnterCriticalRegion();
if (InterlockedExchange(&FastMutex->Count, 0) == 1)
{
FastMutex->Owner = KeGetCurrentThread();
return(TRUE);
}
else
{
KeLeaveCriticalRegion();
return(FALSE);
}
}
/* EOF */

View file

@ -1,4 +1,4 @@
# $Id: Makefile,v 1.59 2001/12/27 23:56:41 dwelch Exp $
# $Id: Makefile,v 1.60 2001/12/31 01:53:44 dwelch Exp $
#
# ReactOS Operating System
#
@ -150,7 +150,8 @@ OBJECTS_MM = \
mm/kmap.o \
mm/mpw.o \
mm/pageop.o \
mm/balance.o
mm/balance.o \
mm/rmap.o
# I/O Subsystem (Io)
OBJECTS_IO = \

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: view.c,v 1.31 2001/12/29 14:32:21 dwelch Exp $
/* $Id: view.c,v 1.32 2001/12/31 01:53:44 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -62,14 +62,6 @@
#define NDEBUG
#include <internal/debug.h>
/* TYPES *********************************************************************/
typedef struct _CC_FREE_CONTEXT
{
DWORD Maximum;
PVOID* FreedPages;
} CC_FREE_CONTEXT, *PCC_FREE_CONTEXT;
/* GLOBALS *******************************************************************/
#define ROUND_UP(N, S) ((((N) + (S) - 1) / (S)) * (S))
@ -85,17 +77,16 @@ static LIST_ENTRY CacheSegmentLRUListHead;
static FAST_MUTEX ViewLock;
NTSTATUS STDCALL
CcRosInternalFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg, PVOID Context);
CcRosInternalFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg);
/* FUNCTIONS *****************************************************************/
NTSTATUS
CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed, PVOID* FreedPages)
CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
{
PLIST_ENTRY current_entry;
PCACHE_SEGMENT current;
ULONG PagesPerSegment;
CC_FREE_CONTEXT FreeContext;
ULONG PagesFreed;
DPRINT("CcRosTrimCache(Target %d)\n", Target);
@ -117,13 +108,10 @@ CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed, PVOID* FreedPages)
ExReleaseFastMutex(&current->Lock);
DPRINT("current->Bcb->CacheSegmentSize %d\n", current->Bcb->CacheSegmentSize);
PagesPerSegment = current->Bcb->CacheSegmentSize / PAGESIZE;
FreeContext.Maximum = min(PagesPerSegment, Target);
FreeContext.FreedPages = FreedPages;
CcRosInternalFreeCacheSegment(current->Bcb, current, (PVOID)&FreeContext);
CcRosInternalFreeCacheSegment(current->Bcb, current);
DPRINT("CcRosTrimCache(): Freed %d\n", PagesPerSegment);
PagesFreed = min(PagesPerSegment, Target);
Target = Target - PagesFreed;
FreedPages = FreedPages + PagesFreed;
(*NrFreed) = (*NrFreed) + PagesFreed;
}
ExReleaseFastMutex(&ViewLock);
@ -159,6 +147,80 @@ CcRosReleaseCacheSegment(PBCB Bcb,
return(STATUS_SUCCESS);
}
PCACHE_SEGMENT CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
{
PLIST_ENTRY current_entry;
PCACHE_SEGMENT current;
KIRQL oldIrql;
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
current_entry = Bcb->BcbSegmentListHead.Flink;
while (current_entry != &Bcb->BcbSegmentListHead)
{
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
if (current->FileOffset <= FileOffset &&
(current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
{
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
return(current);
}
current_entry = current_entry->Flink;
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
return(NULL);
}
NTSTATUS
CcRosSuggestFreeCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
{
PCACHE_SEGMENT CacheSeg;
ExAcquireFastMutex(&ViewLock);
CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
if (CacheSeg == NULL)
{
KeBugCheck(0);
}
ExAcquireFastMutex(&CacheSeg->Lock);
if (CacheSeg->MappedCount > 0)
{
KeBugCheck(0);
}
CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
if (CacheSeg->Dirty || CacheSeg->ReferenceCount > 0)
{
ExReleaseFastMutex(&CacheSeg->Lock);
ExReleaseFastMutex(&ViewLock);
return(STATUS_UNSUCCESSFUL);
}
ExReleaseFastMutex(&CacheSeg->Lock);
CcRosInternalFreeCacheSegment(CacheSeg->Bcb, CacheSeg);
ExReleaseFastMutex(&ViewLock);
return(STATUS_SUCCESS);
}
NTSTATUS
CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
{
PCACHE_SEGMENT CacheSeg;
ExAcquireFastMutex(&ViewLock);
CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
if (CacheSeg == NULL)
{
ExReleaseFastMutex(&ViewLock);
return(STATUS_UNSUCCESSFUL);
}
CacheSeg->ReferenceCount++;
ExReleaseFastMutex(&ViewLock);
ExAcquireFastMutex(&CacheSeg->Lock);
CacheSeg->MappedCount--;
CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
ExReleaseFastMutex(&CacheSeg->Lock);
return(STATUS_SUCCESS);
}
NTSTATUS
CcRosGetCacheSegment(PBCB Bcb,
ULONG FileOffset,
@ -167,7 +229,6 @@ CcRosGetCacheSegment(PBCB Bcb,
PBOOLEAN UptoDate,
PCACHE_SEGMENT* CacheSeg)
{
PLIST_ENTRY current_entry;
PCACHE_SEGMENT current;
ULONG i;
NTSTATUS Status;
@ -181,33 +242,26 @@ CcRosGetCacheSegment(PBCB Bcb,
/*
* Look for a cache segment already mapping the same data.
*/
current_entry = Bcb->BcbSegmentListHead.Flink;
while (current_entry != &Bcb->BcbSegmentListHead)
current = CcRosLookupCacheSegment(Bcb, FileOffset);
if (current != NULL)
{
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
if (current->FileOffset <= FileOffset &&
(current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
{
/*
* Make sure the cache segment can't go away outside of our control.
*/
current->ReferenceCount++;
/*
* Release the global lock and lock the cache segment.
*/
ExReleaseFastMutex(&ViewLock);
ExAcquireFastMutex(&current->Lock);
/*
* Return information about the segment to the caller.
*/
*UptoDate = current->Valid;
*BaseAddress = current->BaseAddress;
*CacheSeg = current;
*BaseOffset = current->FileOffset;
return(STATUS_SUCCESS);
}
current_entry = current_entry->Flink;
/*
* Make sure the cache segment can't go away outside of our control.
*/
current->ReferenceCount++;
/*
* Release the global lock and lock the cache segment.
*/
ExReleaseFastMutex(&ViewLock);
ExAcquireFastMutex(&current->Lock);
/*
* Return information about the segment to the caller.
*/
*UptoDate = current->Valid;
*BaseAddress = current->BaseAddress;
*CacheSeg = current;
*BaseOffset = current->FileOffset;
return(STATUS_SUCCESS);
}
/*
@ -302,26 +356,17 @@ CcRosRequestCacheSegment(PBCB Bcb,
}
STATIC VOID
CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr)
CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr,
BOOLEAN Dirty)
{
PCC_FREE_CONTEXT FreeContext = (PCC_FREE_CONTEXT)Context;
ULONG Offset = (Address - MemoryArea->BaseAddress) / PAGESIZE;
if (PhysAddr != 0)
{
if (Context == NULL || Offset >= FreeContext->Maximum)
{
MmReleasePageMemoryConsumer(MC_CACHE, (PVOID)PhysAddr);
}
else
{
DPRINT("Address %X Offset %d PhysAddr %X\n", Address, Offset, PhysAddr);
FreeContext->FreedPages[Offset] = (PVOID)PhysAddr;
}
MmReleasePageMemoryConsumer(MC_CACHE, (PVOID)PhysAddr);
}
}
NTSTATUS STDCALL
CcRosInternalFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg, PVOID Context)
CcRosInternalFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
/*
* FUNCTION: Releases a cache segment associated with a BCB
*/
@ -335,7 +380,7 @@ CcRosInternalFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg, PVOID Context)
CacheSeg->BaseAddress,
Bcb->CacheSegmentSize,
CcFreeCachePage,
Context);
NULL);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
ExFreePool(CacheSeg);
return(STATUS_SUCCESS);
@ -346,7 +391,7 @@ CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
{
NTSTATUS Status;
ExAcquireFastMutex(&ViewLock);
Status = CcRosInternalFreeCacheSegment(Bcb, CacheSeg, NULL);
Status = CcRosInternalFreeCacheSegment(Bcb, CacheSeg);
ExReleaseFastMutex(&ViewLock);
return(Status);
}

View file

@ -1,6 +1,6 @@
#ifndef __INCLUDE_INTERNAL_CC_H
#define __INCLUDE_INTERNAL_CC_H
/* $Id: cc.h,v 1.8 2001/12/29 14:32:22 dwelch Exp $ */
/* $Id: cc.h,v 1.9 2001/12/31 01:53:44 dwelch Exp $ */
#include <ddk/ntifs.h>
typedef struct _BCB
@ -62,5 +62,9 @@ NTSTATUS ReadCacheSegment(PCACHE_SEGMENT CacheSeg);
NTSTATUS WriteCacheSegment(PCACHE_SEGMENT CacheSeg);
VOID CcInit(VOID);
NTSTATUS
CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty);
NTSTATUS
CcRosSuggestFreeCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty);
#endif

View file

@ -11,6 +11,9 @@
/* TYPES *********************************************************************/
struct _EPROCESS;
struct _MM_RMAP_ENTRY;
struct _MM_PAGEOP;
typedef ULONG SWAPENTRY;
#define MEMORY_AREA_INVALID (0)
@ -186,7 +189,7 @@ NTSTATUS MmFreeMemoryArea(PMADDRESS_SPACE AddressSpace,
PVOID BaseAddress,
ULONG Length,
VOID (*FreePage)(PVOID Context, MEMORY_AREA* MemoryArea,
PVOID Address, ULONG PhysAddr),
PVOID Address, ULONG PhysAddr, BOOLEAN Dirty),
PVOID FreePageContext);
VOID MmDumpMemoryAreas(PLIST_ENTRY ListHead);
NTSTATUS MmLockMemoryArea(MEMORY_AREA* MemoryArea);
@ -209,7 +212,8 @@ PVOID MmInitializePageList(PVOID FirstPhysKernelAddress,
PADDRESS_RANGE BIOSMemoryMap,
ULONG AddressRangeCount);
PVOID MmAllocPage(SWAPENTRY SavedSwapEntry);
PVOID
MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry);
VOID MmDereferencePage(PVOID PhysicalAddress);
VOID MmReferencePage(PVOID PhysicalAddress);
VOID MmDeletePageTable(struct _EPROCESS* Process,
@ -280,14 +284,16 @@ VOID MmClearWaitPage(PVOID Page);
VOID MmSetWaitPage(PVOID Page);
BOOLEAN MmIsPageDirty(struct _EPROCESS* Process, PVOID Address);
BOOLEAN MmIsPageTablePresent(PVOID PAddress);
ULONG MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
MEMORY_AREA* MemoryArea,
PVOID Address,
PBOOLEAN Ul);
ULONG MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
PBOOLEAN Ul);
NTSTATUS
MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
struct _MM_PAGEOP* PageOp);
NTSTATUS
MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
struct _MM_PAGEOP* PageOp);
MEMORY_AREA* MmOpenMemoryAreaByRegion(PMADDRESS_SPACE AddressSpace,
PVOID Address,
ULONG Length);
@ -295,17 +301,6 @@ MEMORY_AREA* MmOpenMemoryAreaByRegion(PMADDRESS_SPACE AddressSpace,
VOID ExUnmapPage(PVOID Addr);
PVOID ExAllocatePage(VOID);
VOID MmLockWorkingSet(struct _EPROCESS* Process);
VOID MmUnlockWorkingSet(struct _EPROCESS* Process);
VOID MmInitializeWorkingSet(struct _EPROCESS* Process,
PMADDRESS_SPACE AddressSpace);
ULONG MmTrimWorkingSet(struct _EPROCESS* Process,
ULONG ReduceHint);
VOID MmRemovePageFromWorkingSet(struct _EPROCESS* Process,
PVOID Address);
VOID MmAddPageToWorkingSet(struct _EPROCESS* Process,
PVOID Address);
VOID MmInitPagingFile(VOID);
BOOLEAN MmReserveSwapPages(ULONG Nr);
VOID MmDereserveSwapPages(ULONG Nr);
@ -322,10 +317,6 @@ VOID MmInit3(VOID);
NTSTATUS MmInitPagerThread(VOID);
VOID MmInitKernelMap(PVOID BaseAddress);
VOID MmWaitForFreePages(VOID);
PVOID MmMustAllocPage(SWAPENTRY SavedSwapEntry);
PVOID MmAllocPageMaybeSwap(SWAPENTRY SavedSwapEntry);
NTSTATUS MmCreatePageTable(PVOID PAddress);
typedef struct
@ -477,7 +468,7 @@ NTSTATUS MmCommitPagedPoolAddress(PVOID Address);
VOID
MmInitializeMemoryConsumer(ULONG Consumer,
NTSTATUS (*Trim)(ULONG Target, ULONG Priority,
PULONG NrFreed, PVOID* FreedPages));
PULONG NrFreed));
VOID
MmInitializeBalancer(ULONG NrAvailablePages);
NTSTATUS
@ -491,4 +482,30 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait, PVOID* AllocatedPag
#define MC_NPPOOL (3)
#define MC_MAXIMUM (4)
VOID
MmSetRmapListHeadPage(PVOID PhysicalAddress, struct _MM_RMAP_ENTRY* ListHead);
struct _MM_RMAP_ENTRY*
MmGetRmapListHeadPage(PVOID PhysicalAddress);
VOID
MmInsertRmap(PVOID PhysicalAddress, PEPROCESS Process, PVOID Address);
VOID
MmDeleteAllRmaps(PVOID PhysicalAddress, PVOID Context,
VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process, PVOID Address));
VOID
MmDeleteRmap(PVOID PhysicalAddress, PEPROCESS Process, PVOID Address);
VOID
MmInitializeRmapList(VOID);
PVOID
MmGetLRUNextUserPage(PVOID PreviousPhysicalAddress);
PVOID
MmGetLRUFirstUserPage(VOID);
NTSTATUS
MmPageOutPhysicalAddress(PVOID PhysicalAddress);
NTSTATUS
MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages);
VOID
MmDisableVirtualMapping(PEPROCESS Process, PVOID Address, BOOL* WasDirty, ULONG* PhysicalAddr);
VOID MmEnableVirtualMapping(PEPROCESS Process, PVOID Address);
#endif

View file

@ -47,7 +47,7 @@ static PVOID PcrPages[MAXIMUM_PROCESSORS];
VOID
KePrepareForApplicationProcessorInit(ULONG Id)
{
PcrPages[Id] = MmAllocPage(0);
MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &PcrPages[Id]);
KiGdtPrepareForApplicationProcessorInit(Id);
}

View file

@ -43,7 +43,8 @@
/* FUNCTIONS *****************************************************************/
VOID
KeFreeStackPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr)
KeFreeStackPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr,
BOOLEAN Dirty)
{
if (PhysAddr != 0)
{
@ -116,10 +117,16 @@ KeInitializeThread(PKPROCESS Process, PKTHREAD Thread, BOOLEAN First)
}
for (i = 0; i < (MM_STACK_SIZE / PAGESIZE); i++)
{
PVOID Page;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
if (!NT_SUCCESS(Status))
{
KeBugCheck(0);
}
Status = MmCreateVirtualMapping(NULL,
KernelStack + (i * PAGESIZE),
PAGE_EXECUTE_READWRITE,
(ULONG)MmAllocPage(0));
(ULONG)Page);
}
Thread->InitialStack = KernelStack + MM_STACK_SIZE;
Thread->StackBase = KernelStack + MM_STACK_SIZE;

View file

@ -1,4 +1,4 @@
/* $Id: connect.c,v 1.8 2001/12/02 23:34:42 dwelch Exp $
/* $Id: connect.c,v 1.9 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -16,8 +16,8 @@
#include <internal/port.h>
#include <internal/dbg.h>
#include <internal/pool.h>
#include <internal/mm.h>
#include <internal/safe.h>
#include <internal/mm.h>
#define NDEBUG
#include <internal/debug.h>

View file

@ -1,4 +1,4 @@
/* $Id: aspace.c,v 1.7 2001/11/25 15:21:11 dwelch Exp $
/* $Id: aspace.c,v 1.8 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -87,10 +87,6 @@ MmInitializeAddressSpace(PEPROCESS Process,
AddressSpace->LowestAddress = KERNEL_BASE;
}
AddressSpace->Process = Process;
if (Process != NULL)
{
MmInitializeWorkingSet(Process, AddressSpace);
}
if (Process != NULL)
{
AddressSpace->PageTableRefCountTable =

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: balance.c,v 1.2 2001/12/29 14:32:22 dwelch Exp $
/* $Id: balance.c,v 1.3 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -41,15 +41,24 @@ typedef struct _MM_MEMORY_CONSUMER
{
ULONG PagesUsed;
ULONG PagesTarget;
NTSTATUS (*Trim)(ULONG Target, ULONG Priority, PULONG NrFreed, PVOID* FreedPages);
NTSTATUS (*Trim)(ULONG Target, ULONG Priority, PULONG NrFreed);
} MM_MEMORY_CONSUMER, *PMM_MEMORY_CONSUMER;
typedef struct _MM_ALLOCATION_REQUEST
{
PVOID Page;
LIST_ENTRY ListEntry;
KEVENT Event;
} MM_ALLOCATION_REQUEST, *PMM_ALLOCATION_REQUEST;
/* GLOBALS ******************************************************************/
static MM_MEMORY_CONSUMER MiMemoryConsumers[MC_MAXIMUM];
static ULONG MiMinimumAvailablePages;
static ULONG MiNrAvailablePages;
static ULONG MiNrTotalPages;
static LIST_ENTRY AllocationListHead;
static KSPIN_LOCK AllocationListLock;
/* FUNCTIONS ****************************************************************/
@ -57,6 +66,8 @@ VOID
MmInitializeBalancer(ULONG NrAvailablePages)
{
memset(MiMemoryConsumers, 0, sizeof(MiMemoryConsumers));
InitializeListHead(&AllocationListHead);
KeInitializeSpinLock(&AllocationListLock);
MiNrAvailablePages = MiNrTotalPages = NrAvailablePages;
@ -71,7 +82,7 @@ MmInitializeBalancer(ULONG NrAvailablePages)
VOID
MmInitializeMemoryConsumer(ULONG Consumer,
NTSTATUS (*Trim)(ULONG Target, ULONG Priority,
PULONG NrFreed, PVOID* FreedPages))
PULONG NrFreed))
{
MiMemoryConsumers[Consumer].Trim = Trim;
}
@ -79,9 +90,26 @@ MmInitializeMemoryConsumer(ULONG Consumer,
NTSTATUS
MmReleasePageMemoryConsumer(ULONG Consumer, PVOID Page)
{
PMM_ALLOCATION_REQUEST Request;
PLIST_ENTRY Entry;
KIRQL oldIrql;
InterlockedDecrement(&MiMemoryConsumers[Consumer].PagesUsed);
InterlockedIncrement(&MiNrAvailablePages);
MmDereferencePage(Page);
KeAcquireSpinLock(&AllocationListLock, &oldIrql);
if (IsListEmpty(&AllocationListHead))
{
KeReleaseSpinLock(&AllocationListLock, oldIrql);
MmDereferencePage(Page);
}
else
{
Entry = RemoveHeadList(&AllocationListHead);
Request = CONTAINING_RECORD(Entry, MM_ALLOCATION_REQUEST, ListEntry);
KeReleaseSpinLock(&AllocationListLock, oldIrql);
Request->Page = Page;
KeSetEvent(&Request->Event, IO_NO_INCREMENT, FALSE);
}
return(STATUS_SUCCESS);
}
@ -98,19 +126,17 @@ MiTrimMemoryConsumer(ULONG Consumer)
if (MiMemoryConsumers[Consumer].Trim != NULL)
{
MiMemoryConsumers[Consumer].Trim(Target, 0, NULL, NULL);
MiMemoryConsumers[Consumer].Trim(Target, 0, NULL);
}
}
VOID
MiRebalanceMemoryConsumers(PVOID* Page)
MiRebalanceMemoryConsumers(VOID)
{
LONG Target;
ULONG i;
PVOID* FreedPages;
ULONG NrFreedPages;
ULONG TotalFreedPages;
PVOID* OrigFreedPages;
NTSTATUS Status;
Target = MiMinimumAvailablePages - MiNrAvailablePages;
if (Target < 0)
@ -118,36 +144,22 @@ MiRebalanceMemoryConsumers(PVOID* Page)
Target = 1;
}
OrigFreedPages = FreedPages = alloca(sizeof(PVOID) * Target);
TotalFreedPages = 0;
for (i = 0; i < MC_MAXIMUM && Target > 0; i++)
{
if (MiMemoryConsumers[i].Trim != NULL)
{
MiMemoryConsumers[i].Trim(Target, 0, &NrFreedPages, FreedPages);
Status = MiMemoryConsumers[i].Trim(Target, 0, &NrFreedPages);
if (!NT_SUCCESS(Status))
{
KeBugCheck(0);
}
Target = Target - NrFreedPages;
FreedPages = FreedPages + NrFreedPages;
TotalFreedPages = TotalFreedPages + NrFreedPages;
}
}
if (Target > 0)
{
KeBugCheck(0);
}
if (Page != NULL)
{
*Page = OrigFreedPages[0];
i = 1;
}
else
{
i = 0;
}
for (; i < TotalFreedPages; i++)
{
MmDereferencePage(OrigFreedPages[i]);
}
}
NTSTATUS
@ -183,15 +195,18 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait, PVOID* AllocatedPag
InterlockedDecrement(&MiMemoryConsumers[Consumer].PagesUsed);
return(STATUS_NO_MEMORY);
}
MiRebalanceMemoryConsumers(NULL);
MiRebalanceMemoryConsumers();
}
/*
* Actually allocate the page.
*/
Page = MmAllocPage(0);
Page = MmAllocPage(Consumer, 0);
if (Page == NULL)
{
MM_ALLOCATION_REQUEST Request;
KIRQL oldIrql;
/* Still not trimmed enough. */
if (!CanWait)
{
@ -199,7 +214,19 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait, PVOID* AllocatedPag
InterlockedDecrement(&MiMemoryConsumers[Consumer].PagesUsed);
return(STATUS_NO_MEMORY);
}
MiRebalanceMemoryConsumers(&Page);
/* Insert an allocation request. */
Request.Page = NULL;
KeInitializeEvent(&Request.Event, NotificationEvent, FALSE);
KeAcquireSpinLock(&AllocationListLock, &oldIrql);
InsertTailList(&AllocationListHead, &Request.ListEntry);
KeReleaseSpinLock(&AllocationListLock, oldIrql);
MiRebalanceMemoryConsumers();
Page = Request.Page;
if (Page == NULL)
{
KeBugCheck(0);
}
}
*AllocatedPage = Page;

View file

@ -1,4 +1,4 @@
/* $Id: cont.c,v 1.14 2001/12/29 14:32:22 dwelch Exp $
/* $Id: cont.c,v 1.15 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -20,7 +20,8 @@
/* FUNCTIONS *****************************************************************/
VOID STATIC
MmFreeContinuousPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr)
MmFreeContinuousPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr,
BOOLEAN Dirty)
{
if (PhysAddr != 0)
{

View file

@ -34,14 +34,15 @@ typedef struct _PHYSICAL_PAGE
SWAPENTRY SavedSwapEntry;
ULONG LockCount;
ULONG MapCount;
struct _MM_RMAP_ENTRY* RmapListHead;
} PHYSICAL_PAGE, *PPHYSICAL_PAGE;
/* GLOBALS ****************************************************************/
static PPHYSICAL_PAGE MmPageArray;
static LIST_ENTRY UsedPageListHead;
static KSPIN_LOCK PageListLock;
static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
static LIST_ENTRY FreeZeroedPageListHead;
static LIST_ENTRY FreeUnzeroedPageListHead;
static LIST_ENTRY BiosPageListHead;
@ -54,6 +55,58 @@ MmCreateVirtualMappingUnsafe(struct _EPROCESS* Process,
/* FUNCTIONS *************************************************************/
PVOID
MmGetLRUFirstUserPage(VOID)
{
PLIST_ENTRY NextListEntry;
ULONG Next;
PHYSICAL_PAGE* PageDescriptor;
KIRQL oldIrql;
KeAcquireSpinLock(&PageListLock, &oldIrql);
NextListEntry = UsedPageListHeads[MC_USER].Flink;
if (NextListEntry == &UsedPageListHeads[MC_USER])
{
KeReleaseSpinLock(&PageListLock, oldIrql);
return(NULL);
}
PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
Next = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
Next = (Next / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
KeReleaseSpinLock(&PageListLock, oldIrql);
return((PVOID)Next);
}
PVOID
MmGetLRUNextUserPage(PVOID PreviousPhysicalAddress)
{
ULONG Start = (ULONG)PreviousPhysicalAddress / PAGESIZE;
PLIST_ENTRY NextListEntry;
ULONG Next;
PHYSICAL_PAGE* PageDescriptor;
KIRQL oldIrql;
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (!(MmPageArray[Start].Flags & MM_PHYSICAL_PAGE_USED))
{
NextListEntry = UsedPageListHeads[MC_USER].Flink;
}
else
{
NextListEntry = MmPageArray[Start].ListEntry.Flink;
}
if (NextListEntry == &UsedPageListHeads[MC_USER])
{
KeReleaseSpinLock(&PageListLock, oldIrql);
return(NULL);
}
PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
Next = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
Next = (Next / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
KeReleaseSpinLock(&PageListLock, oldIrql);
return((PVOID)Next);
}
PVOID
MmGetContinuousPages(ULONG NumberOfBytes,
PHYSICAL_ADDRESS HighestAcceptableAddress,
@ -112,7 +165,7 @@ MmGetContinuousPages(ULONG NumberOfBytes,
MmPageArray[i].LockCount = 0;
MmPageArray[i].MapCount = 0;
MmPageArray[i].SavedSwapEntry = 0;
InsertTailList(&UsedPageListHead, &MmPageArray[i].ListEntry);
InsertTailList(&UsedPageListHeads[MC_NPPOOL], &MmPageArray[i].ListEntry);
}
KeReleaseSpinLock(&PageListLock, oldIrql);
return((PVOID)(start * 4096));
@ -229,7 +282,10 @@ PVOID MmInitializePageList(PVOID FirstPhysKernelAddress,
MemorySizeInPages,
LastKernelAddress);
InitializeListHead(&UsedPageListHead);
for (i = 0; i < MC_MAXIMUM; i++)
{
InitializeListHead(&UsedPageListHeads[i]);
}
KeInitializeSpinLock(&PageListLock);
InitializeListHead(&FreeUnzeroedPageListHead);
InitializeListHead(&FreeZeroedPageListHead);
@ -306,7 +362,7 @@ PVOID MmInitializePageList(PVOID FirstPhysKernelAddress,
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
MmPageArray[i].ReferenceCount = 1;
InsertTailList(&UsedPageListHead,
InsertTailList(&UsedPageListHeads[MC_NPPOOL],
&MmPageArray[i].ListEntry);
}
MmStats.NrFreePages += ((0xa0000/PAGESIZE) - i);
@ -358,7 +414,7 @@ PVOID MmInitializePageList(PVOID FirstPhysKernelAddress,
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
MmPageArray[i].ReferenceCount = 1;
InsertTailList(&UsedPageListHead,
InsertTailList(&UsedPageListHeads[MC_NPPOOL],
&MmPageArray[i].ListEntry);
}
}
@ -397,6 +453,22 @@ VOID MmSetFlagsPage(PVOID PhysicalAddress,
KeReleaseSpinLock(&PageListLock, oldIrql);
}
VOID
MmSetRmapListHeadPage(PVOID PhysicalAddress, struct _MM_RMAP_ENTRY* ListHead)
{
ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
MmPageArray[Start].RmapListHead = ListHead;
}
struct _MM_RMAP_ENTRY*
MmGetRmapListHeadPage(PVOID PhysicalAddress)
{
ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
return(MmPageArray[Start].RmapListHead);
}
VOID
MmMarkPageMapped(PVOID PhysicalAddress)
{
@ -559,6 +631,11 @@ VOID MmDereferencePage(PVOID PhysicalAddress)
MmStats.NrFreePages++;
MmStats.NrSystemPages--;
RemoveEntryList(&MmPageArray[Start].ListEntry);
if (MmPageArray[Start].RmapListHead != NULL)
{
DbgPrint("Freeing page with rmap entries.\n");
KeBugCheck(0);
}
if (MmPageArray[Start].MapCount != 0)
{
DbgPrint("Freeing mapped page (0x%x count %d)\n",
@ -659,7 +736,7 @@ VOID MmUnlockPage(PVOID PhysicalAddress)
PVOID
MmAllocPage(SWAPENTRY SavedSwapEntry)
MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
{
ULONG offset;
PLIST_ENTRY ListEntry;
@ -703,7 +780,7 @@ MmAllocPage(SWAPENTRY SavedSwapEntry)
PageDescriptor->LockCount = 0;
PageDescriptor->MapCount = 0;
PageDescriptor->SavedSwapEntry = SavedSwapEntry;
ExInterlockedInsertTailList(&UsedPageListHead, ListEntry, &PageListLock);
ExInterlockedInsertTailList(&UsedPageListHeads[Consumer], ListEntry, &PageListLock);
MmStats.NrSystemPages++;
MmStats.NrFreePages--;
@ -717,33 +794,3 @@ MmAllocPage(SWAPENTRY SavedSwapEntry)
DPRINT("MmAllocPage() = %x\n",offset);
return((PVOID)offset);
}
PVOID
MmMustAllocPage(SWAPENTRY SavedSwapEntry)
{
PVOID Page;
Page = MmAllocPage(SavedSwapEntry);
if (Page == NULL)
{
KeBugCheck(0);
return(NULL);
}
return(Page);
}
PVOID
MmAllocPageMaybeSwap(SWAPENTRY SavedSwapEntry)
{
PVOID Page;
Page = MmAllocPage(SavedSwapEntry);
while (Page == NULL)
{
MmWaitForFreePages();
Page = MmAllocPage(SavedSwapEntry);
};
return(Page);
}

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: page.c,v 1.29 2001/04/17 04:11:01 dwelch Exp $
/* $Id: page.c,v 1.30 2001/12/31 01:53:46 dwelch Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/i386/page.c
@ -239,10 +239,11 @@ NTSTATUS MmGetPageEntry2(PVOID PAddress, PULONG* Pte)
}
else
{
npage = (ULONG)MmAllocPage(0);
if (npage == 0)
NTSTATUS Status;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, (PVOID*)&npage);
if (!NT_SUCCESS(Status))
{
return(STATUS_NO_MEMORY);
return(Status);
}
(*Pde) = npage | 0x7;
if (Address >= KERNEL_BASE)
@ -335,6 +336,75 @@ ULONG MmGetPhysicalAddressForProcess(PEPROCESS Process,
return(PAGE_MASK(PageEntry));
}
VOID
MmDisableVirtualMapping(PEPROCESS Process, PVOID Address, BOOL* WasDirty, ULONG* PhysicalAddr)
/*
* FUNCTION: Delete a virtual mapping
*/
{
ULONG Pte;
PULONG Pde;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
BOOLEAN WasValid;
/*
* If we are setting a page in another process we need to be in its
* context.
*/
if (Process != NULL && Process != CurrentProcess)
{
KeAttachProcess(Process);
}
/*
* Set the page directory entry, we may have to copy the entry from
* the global page directory.
*/
Pde = ADDR_TO_PDE(Address);
if ((*Pde) == 0 &&
MmGlobalKernelPageDirectory[ADDR_TO_PDE_OFFSET(Address)] != 0)
{
(*Pde) = MmGlobalKernelPageDirectory[ADDR_TO_PDE_OFFSET(Address)];
FLUSH_TLB;
}
if ((*Pde) == 0)
{
KeBugCheck(0);
}
/*
* Atomically set the entry to zero and get the old value.
*/
Pte = *ADDR_TO_PTE(Address);
*ADDR_TO_PTE(Address) = Pte & (~PA_PRESENT);
FLUSH_TLB;
WasValid = (PAGE_MASK(Pte) != 0);
if (!WasValid)
{
KeBugCheck(0);
}
/*
* If necessary go back to the original context
*/
if (Process != NULL && Process != CurrentProcess)
{
KeDetachProcess();
}
/*
* Return some information to the caller
*/
if (WasDirty != NULL)
{
*WasDirty = Pte & PA_DIRTY;
}
if (PhysicalAddr != NULL)
{
*PhysicalAddr = PAGE_MASK(Pte);
}
}
VOID
MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address, BOOL FreePage,
BOOL* WasDirty, ULONG* PhysicalAddr)
@ -373,6 +443,14 @@ MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address, BOOL FreePage,
{
KeDetachProcess();
}
if (WasDirty != NULL)
{
*WasDirty = FALSE;
}
if (PhysicalAddr != NULL)
{
*PhysicalAddr = 0;
}
return;
}
@ -380,6 +458,7 @@ MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address, BOOL FreePage,
* Atomically set the entry to zero and get the old value.
*/
Pte = (ULONG)InterlockedExchange((PLONG)ADDR_TO_PTE(Address), 0);
FLUSH_TLB;
WasValid = (PAGE_MASK(Pte) != 0);
if (WasValid)
{
@ -402,12 +481,10 @@ MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address, BOOL FreePage,
Ptrc = Process->AddressSpace.PageTableRefCountTable;
Ptrc[ADDR_TO_PAGE_TABLE(Address)]--;
#if 1
if (Ptrc[ADDR_TO_PAGE_TABLE(Address)] == 0)
{
MmFreePageTable(Process, Address);
}
#endif
}
/*
@ -481,14 +558,15 @@ NTSTATUS MmCreatePageTable(PVOID PAddress)
}
if ((*page_dir) == 0)
{
npage = (ULONG)MmAllocPage(0);
if (npage == 0)
{
return(STATUS_UNSUCCESSFUL);
}
(*page_dir) = npage | 0x7;
memset((PVOID)PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
FLUSH_TLB;
NTSTATUS Status;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, (PVOID*)&npage);
if (!NT_SUCCESS(Status))
{
return(Status);
}
(*page_dir) = npage | 0x7;
memset((PVOID)PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
FLUSH_TLB;
}
return(STATUS_SUCCESS);
}
@ -515,14 +593,15 @@ PULONG MmGetPageEntry(PVOID PAddress)
}
if ((*page_dir) == 0)
{
npage = (ULONG)MmAllocPage(0);
if (npage == 0)
{
KeBugCheck(0);
}
(*page_dir) = npage | 0x7;
memset((PVOID)PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
FLUSH_TLB;
NTSTATUS Status;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, (PVOID*)&npage);
if (!NT_SUCCESS(Status))
{
KeBugCheck(0);
}
(*page_dir) = npage | 0x7;
memset((PVOID)PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
FLUSH_TLB;
}
page_tlb = ADDR_TO_PTE(Address);
DPRINT("page_tlb %x\n",page_tlb);
@ -578,6 +657,24 @@ VOID MmSetCleanPage(PEPROCESS Process, PVOID Address)
}
}
VOID MmEnableVirtualMapping(PEPROCESS Process, PVOID Address)
{
PULONG PageEntry;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
if (Process != CurrentProcess)
{
KeAttachProcess(Process);
}
PageEntry = MmGetPageEntry(Address);
(*PageEntry) = (*PageEntry) | PA_PRESENT;
FLUSH_TLB;
if (Process != CurrentProcess)
{
KeDetachProcess();
}
}
BOOLEAN MmIsPagePresent(PEPROCESS Process, PVOID Address)
{
return((MmGetPageEntryForProcess1(Process, Address)) & PA_PRESENT);

View file

@ -1,4 +1,4 @@
/* $Id: kmap.c,v 1.12 2001/12/06 00:54:54 dwelch Exp $
/* $Id: kmap.c,v 1.13 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -51,9 +51,10 @@ PVOID
ExAllocatePage(VOID)
{
ULONG PhysPage;
NTSTATUS Status;
PhysPage = (ULONG)MmAllocPage(0);
if (PhysPage == 0)
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, (PVOID*)&PhysPage);
if (!NT_SUCCESS(Status))
{
return(NULL);
}

View file

@ -295,7 +295,7 @@ MmFreeMemoryArea(PMADDRESS_SPACE AddressSpace,
PVOID BaseAddress,
ULONG Length,
VOID (*FreePage)(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
ULONG PhysAddr),
ULONG PhysAddr, BOOLEAN Dirty),
PVOID FreePageContext)
{
MEMORY_AREA* MemoryArea;
@ -314,17 +314,15 @@ MmFreeMemoryArea(PMADDRESS_SPACE AddressSpace,
for (i=0; i<(PAGE_ROUND_UP(MemoryArea->Length)/PAGESIZE); i++)
{
ULONG PhysAddr;
BOOL Dirty;
PhysAddr =
MmGetPhysicalAddressForProcess(AddressSpace->Process,
MemoryArea->BaseAddress + (i*PAGESIZE));
MmDeleteVirtualMapping(AddressSpace->Process,
MemoryArea->BaseAddress + (i*PAGESIZE),
FALSE, NULL, NULL);
if (FreePage != NULL)
MmDeleteVirtualMapping(AddressSpace->Process,
MemoryArea->BaseAddress + (i*PAGESIZE),
FALSE, &Dirty, &PhysAddr);
if (FreePage != NULL)
{
FreePage(FreePageContext, MemoryArea,
MemoryArea->BaseAddress + (i * PAGESIZE), PhysAddr);
MemoryArea->BaseAddress + (i * PAGESIZE), PhysAddr, Dirty);
}
}

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: mm.c,v 1.51 2001/12/27 23:56:42 dwelch Exp $
/* $Id: mm.c,v 1.52 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -245,7 +245,14 @@ NTSTATUS MmAccessFault(KPROCESSOR_MODE Mode,
NTSTATUS MmCommitPagedPoolAddress(PVOID Address)
{
NTSTATUS Status;
PVOID AllocatedPage = MmAllocPage(0);
PVOID AllocatedPage;
Status = MmRequestPageMemoryConsumer(MC_PPOOL, FALSE, &AllocatedPage);
if (!NT_SUCCESS(Status))
{
MmUnlockAddressSpace(MmGetKernelAddressSpace());
Status = MmRequestPageMemoryConsumer(MC_PPOOL, TRUE, &AllocatedPage);
MmLockAddressSpace(MmGetKernelAddressSpace());
}
Status =
MmCreateVirtualMapping(NULL,
(PVOID)PAGE_ROUND_DOWN(Address),

View file

@ -1,4 +1,4 @@
/* $Id: mminit.c,v 1.28 2001/12/20 03:56:09 dwelch Exp $
/* $Id: mminit.c,v 1.29 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -180,7 +180,7 @@ VOID MmInitVirtualMemory(ULONG LastKernelAddress,
0,
&kernel_shared_data_desc,
FALSE);
MmSharedDataPagePhysicalAddress = MmAllocPage(0);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &MmSharedDataPagePhysicalAddress);
Status = MmCreateVirtualMapping(NULL,
(PVOID)KERNEL_SHARED_DATA_BASE,
PAGE_READWRITE,
@ -191,6 +191,11 @@ VOID MmInitVirtualMemory(ULONG LastKernelAddress,
KeBugCheck(0);
}
((PKUSER_SHARED_DATA)KERNEL_SHARED_DATA_BASE)->TickCountLow = 0xdeadbeef;
/*
*
*/
MmInitializeMemoryConsumer(MC_USER, MmTrimUserMemory);
}
VOID MmInit1(ULONG FirstKrnlPhysAddr,
@ -345,6 +350,7 @@ VOID MmInit3(VOID)
{
MmInitPagerThread();
MmCreatePhysicalMemorySection();
MmInitializeRmapList();
/* FIXME: Read parameters from memory */
}

View file

@ -1,4 +1,4 @@
/* $Id: mpw.c,v 1.6 2001/08/26 17:29:09 ekohl Exp $
/* $Id: mpw.c,v 1.7 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -24,70 +24,10 @@
static HANDLE MpwThreadHandle;
static CLIENT_ID MpwThreadId;
static KEVENT MpwThreadEvent;
static PEPROCESS LastProcess;
static volatile BOOLEAN MpwThreadShouldTerminate;
static ULONG CountToWrite;
/* FUNCTIONS *****************************************************************/
VOID MmStartWritingPages(VOID)
{
CountToWrite = CountToWrite + MmStats.NrDirtyPages;
}
ULONG MmWritePage(PMADDRESS_SPACE AddressSpace,
PVOID Address)
{
PMEMORY_AREA MArea;
NTSTATUS Status;
MArea = MmOpenMemoryAreaByAddress(AddressSpace, Address);
switch(MArea->Type)
{
case MEMORY_AREA_SYSTEM:
return(STATUS_UNSUCCESSFUL);
case MEMORY_AREA_SECTION_VIEW_COMMIT:
Status = MmWritePageSectionView(AddressSpace,
MArea,
Address);
return(Status);
case MEMORY_AREA_VIRTUAL_MEMORY:
Status = MmWritePageVirtualMemory(AddressSpace,
MArea,
Address);
return(Status);
}
return(STATUS_UNSUCCESSFUL);
}
VOID MmWritePagesInProcess(PEPROCESS Process)
{
PVOID Address;
NTSTATUS Status;
MmLockAddressSpace(&Process->AddressSpace);
while ((Address = MmGetDirtyPagesFromWorkingSet(Process)) != NULL)
{
Status = MmWritePage(&Process->AddressSpace, Address);
if (NT_SUCCESS(Status))
{
CountToWrite = CountToWrite - 1;
if (CountToWrite == 0)
{
MmUnlockAddressSpace(&Process->AddressSpace);
return;
}
}
}
MmUnlockAddressSpace(&Process->AddressSpace);
}
NTSTATUS STDCALL
MmMpwThreadMain(PVOID Ignored)
{
@ -111,18 +51,6 @@ MmMpwThreadMain(PVOID Ignored)
DbgPrint("MpwThread: Terminating\n");
return(STATUS_SUCCESS);
}
do
{
KeAttachProcess(LastProcess);
MmWritePagesInProcess(LastProcess);
KeDetachProcess();
if (CountToWrite != 0)
{
LastProcess = PsGetNextProcess(LastProcess);
}
} while (CountToWrite > 0 &&
LastProcess != PsInitialSystemProcess);
}
}
@ -130,8 +58,6 @@ NTSTATUS MmInitMpwThread(VOID)
{
NTSTATUS Status;
CountToWrite = 0;
LastProcess = PsInitialSystemProcess;
MpwThreadShouldTerminate = FALSE;
KeInitializeEvent(&MpwThreadEvent,
SynchronizationEvent,

View file

@ -1,4 +1,4 @@
/* $Id: ncache.c,v 1.12 2001/12/29 14:32:22 dwelch Exp $
/* $Id: ncache.c,v 1.13 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -75,7 +75,7 @@ MmAllocateNonCachedMemory(IN ULONG NumberOfBytes)
{
PVOID NPage;
NPage = MmAllocPageMaybeSwap(0);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &NPage);
MmCreateVirtualMapping (NULL,
Result + (i * PAGESIZE),
Attributes,
@ -85,7 +85,8 @@ MmAllocateNonCachedMemory(IN ULONG NumberOfBytes)
}
VOID STATIC
MmFreeNonCachedPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr)
MmFreeNonCachedPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr,
BOOLEAN Dirty)
{
if (PhysAddr != 0)
{

View file

@ -1,4 +1,4 @@
/* $Id: npool.c,v 1.51 2001/12/20 03:56:09 dwelch Exp $
/* $Id: npool.c,v 1.52 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -703,10 +703,16 @@ static BLOCK_HDR* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
for (i=0;i<nr_pages;i++)
{
Status = MmCreateVirtualMapping(NULL,
(PVOID)(start + (i*PAGESIZE)),
PAGE_READWRITE,
(ULONG)MmAllocPage(0));
PVOID Page;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
if (!NT_SUCCESS(Status))
{
return(NULL);
}
Status = MmCreateVirtualMapping(NULL,
(PVOID)(start + (i*PAGESIZE)),
PAGE_READWRITE,
(ULONG)Page);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");

View file

@ -1,4 +1,4 @@
/* $Id: pager.c,v 1.8 2001/08/26 17:29:09 ekohl Exp $
/* $Id: pager.c,v 1.9 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -24,44 +24,10 @@
static HANDLE PagerThreadHandle;
static CLIENT_ID PagerThreadId;
static KEVENT PagerThreadEvent;
static PEPROCESS LastProcess;
static volatile BOOLEAN PagerThreadShouldTerminate;
static volatile ULONG PageCount;
static volatile ULONG WaiterCount;
static KEVENT FreedMemEvent;
static BOOLEAN PagerThreadShouldTerminate;
/* FUNCTIONS *****************************************************************/
VOID MmWaitForFreePages(VOID)
{
InterlockedIncrement((PULONG)&PageCount);
KeClearEvent(&FreedMemEvent);
KeSetEvent(&PagerThreadEvent,
IO_NO_INCREMENT,
FALSE);
InterlockedIncrement((PULONG)&WaiterCount);
KeWaitForSingleObject(&FreedMemEvent,
0,
KernelMode,
FALSE,
NULL);
InterlockedDecrement((PULONG)&WaiterCount);
}
static VOID MmTryPageOutFromProcess(PEPROCESS Process)
{
ULONG P;
MmLockAddressSpace(&Process->AddressSpace);
P = MmTrimWorkingSet(Process, PageCount);
if (P > 0)
{
InterlockedExchangeAdd((PULONG)&PageCount, -P);
KeSetEvent(&FreedMemEvent, IO_NO_INCREMENT, FALSE);
}
MmUnlockAddressSpace(&Process->AddressSpace);
}
static NTSTATUS STDCALL
MmPagerThreadMain(PVOID Ignored)
{
@ -84,22 +50,6 @@ MmPagerThreadMain(PVOID Ignored)
DbgPrint("PagerThread: Terminating\n");
return(STATUS_SUCCESS);
}
while (WaiterCount > 0)
{
while (PageCount > 0)
{
KeAttachProcess(LastProcess);
MmTryPageOutFromProcess(LastProcess);
KeDetachProcess();
if (PageCount != 0)
{
LastProcess = PsGetNextProcess(LastProcess);
}
}
DbgPrint("Out of memory\n");
KeSetEvent(&FreedMemEvent, IO_NO_INCREMENT, FALSE);
}
}
}
@ -107,16 +57,10 @@ NTSTATUS MmInitPagerThread(VOID)
{
NTSTATUS Status;
PageCount = 0;
WaiterCount = 0;
LastProcess = PsInitialSystemProcess;
PagerThreadShouldTerminate = FALSE;
KeInitializeEvent(&PagerThreadEvent,
SynchronizationEvent,
FALSE);
KeInitializeEvent(&FreedMemEvent,
NotificationEvent,
FALSE);
Status = PsCreateSystemThread(&PagerThreadHandle,
THREAD_ALL_ACCESS,

View file

@ -1,4 +1,4 @@
/* $Id: ppool.c,v 1.5 2001/12/26 23:34:07 dwelch Exp $
/* $Id: ppool.c,v 1.6 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -122,7 +122,7 @@ ExAllocatePagedPoolWithTag (IN POOL_TYPE PoolType,
/*
* Is there enough space to create a second block from the unused portion.
*/
if ((BestBlock->Size - BlockSize) > sizeof(PMM_PPOOL_USED_BLOCK_HEADER))
if ((BestBlock->Size - BlockSize) > sizeof(MM_PPOOL_FREE_BLOCK_HEADER))
{
ULONG NewSize = BestBlock->Size - BlockSize;

233
reactos/ntoskrnl/mm/rmap.c Normal file
View file

@ -0,0 +1,233 @@
/*
* ReactOS kernel
* Copyright (C) 1998, 1999, 2000, 2001 ReactOS Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: rmap.c,v 1.1 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/rmap.c
* PURPOSE: kernel memory managment functions
* PROGRAMMER: David Welch (welch@cwcom.net)
* UPDATE HISTORY:
* Created 27/12/01
*/
/* INCLUDES *****************************************************************/
#include <ddk/ntddk.h>
#include <internal/mm.h>
#include <internal/ps.h>
#define NDEBUG
#include <internal/debug.h>
/* TYPES ********************************************************************/
typedef struct _MM_RMAP_ENTRY
{
struct _MM_RMAP_ENTRY* Next;
PEPROCESS Process;
PVOID Address;
} MM_RMAP_ENTRY, *PMM_RMAP_ENTRY;
/* GLOBALS ******************************************************************/
static FAST_MUTEX RmapListLock;
/* FUNCTIONS ****************************************************************/
VOID
MmInitializeRmapList(VOID)
{
ExInitializeFastMutex(&RmapListLock);
}
NTSTATUS
MmPageOutPhysicalAddress(PVOID PhysicalAddress)
{
PMM_RMAP_ENTRY entry;
PMEMORY_AREA MemoryArea;
ULONG Type;
PVOID Address;
PEPROCESS Process;
PMM_PAGEOP PageOp;
LARGE_INTEGER Offset;
NTSTATUS Status;
ExAcquireFastMutex(&RmapListLock);
entry = MmGetRmapListHeadPage(PhysicalAddress);
if (entry == NULL)
{
ExReleaseFastMutex(&RmapListLock);
return(STATUS_UNSUCCESSFUL);
}
Process = entry->Process;
Address = entry->Address;
if ((((ULONG)Address) & 0xFFF) != 0)
{
KeBugCheck(0);
}
MmLockAddressSpace(&Process->AddressSpace);
MemoryArea = MmOpenMemoryAreaByAddress(&Process->AddressSpace, Address);
Type = MemoryArea->Type;
if (Type == MEMORY_AREA_SECTION_VIEW_COMMIT)
{
Offset.QuadPart = (ULONG)((Address - (ULONG)MemoryArea->BaseAddress) +
MemoryArea->Data.SectionData.ViewOffset);
/*
* Get or create a pageop
*/
PageOp = MmGetPageOp(MemoryArea, 0, 0, MemoryArea->Data.SectionData.Segment,
Offset.u.LowPart, MM_PAGEOP_PAGEOUT);
if (PageOp == NULL)
{
DPRINT1("MmGetPageOp failed\n");
KeBugCheck(0);
}
if (PageOp->Thread != PsGetCurrentThread())
{
MmReleasePageOp(PageOp);
MmUnlockAddressSpace(&Process->AddressSpace);
ExReleaseFastMutex(&RmapListLock);
return(STATUS_UNSUCCESSFUL);
}
/*
* Release locks now we have a page op.
*/
MmUnlockAddressSpace(&Process->AddressSpace);
ExReleaseFastMutex(&RmapListLock);
/*
* Do the actual page out work.
*/
Status = MmPageOutSectionView(&Process->AddressSpace, MemoryArea, Address, PageOp);
}
else if (Type == MEMORY_AREA_VIRTUAL_MEMORY)
{
PageOp = MmGetPageOp(MemoryArea, Process->UniqueProcessId,
Address, NULL, 0, MM_PAGEOP_PAGEOUT);
if (PageOp->Thread != PsGetCurrentThread())
{
MmReleasePageOp(PageOp);
MmUnlockAddressSpace(&Process->AddressSpace);
ExReleaseFastMutex(&RmapListLock);
return(STATUS_UNSUCCESSFUL);
}
/*
* Release locks now we have a page op.
*/
MmUnlockAddressSpace(&Process->AddressSpace);
ExReleaseFastMutex(&RmapListLock);
/*
* Do the actual page out work.
*/
Status = MmPageOutVirtualMemory(&Process->AddressSpace, MemoryArea, Address, PageOp);
}
else
{
KeBugCheck(0);
}
return(Status);
}
VOID
MmInsertRmap(PVOID PhysicalAddress, PEPROCESS Process, PVOID Address)
{
PMM_RMAP_ENTRY current_entry;
PMM_RMAP_ENTRY new_entry;
new_entry = ExAllocatePool(NonPagedPool, sizeof(MM_RMAP_ENTRY));
if (new_entry == NULL)
{
KeBugCheck(0);
}
new_entry->Address = Address;
new_entry->Process = Process;
if (MmGetPhysicalAddressForProcess(Process, Address)!= (ULONG)PhysicalAddress)
{
KeBugCheck(0);
}
ExAcquireFastMutex(&RmapListLock);
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
new_entry->Next = current_entry;
MmSetRmapListHeadPage(PhysicalAddress, new_entry);
ExReleaseFastMutex(&RmapListLock);
}
VOID
MmDeleteAllRmaps(PVOID PhysicalAddress, PVOID Context,
VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process, PVOID Address))
{
PMM_RMAP_ENTRY current_entry;
PMM_RMAP_ENTRY previous_entry;
ExAcquireFastMutex(&RmapListLock);
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
while (current_entry != NULL)
{
previous_entry = current_entry;
current_entry = current_entry->Next;
if (DeleteMapping)
{
DeleteMapping(Context, previous_entry->Process, previous_entry->Address);
}
ExFreePool(previous_entry);
}
MmSetRmapListHeadPage(PhysicalAddress, NULL);
ExReleaseFastMutex(&RmapListLock);
}
VOID
MmDeleteRmap(PVOID PhysicalAddress, PEPROCESS Process, PVOID Address)
{
PMM_RMAP_ENTRY current_entry, previous_entry;
ExAcquireFastMutex(&RmapListLock);
previous_entry = NULL;
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
while (current_entry != NULL)
{
if (current_entry->Process == Process && current_entry->Address == Address)
{
if (previous_entry == NULL)
{
MmSetRmapListHeadPage(PhysicalAddress, current_entry->Next);
ExReleaseFastMutex(&RmapListLock);
ExFreePool(current_entry);
}
else
{
previous_entry->Next = current_entry->Next;
ExReleaseFastMutex(&RmapListLock);
ExFreePool(current_entry);
}
return;
}
previous_entry = current_entry;
current_entry = current_entry->Next;
}
KeBugCheck(0);
}

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: section.c,v 1.69 2001/12/29 14:32:22 dwelch Exp $
/* $Id: section.c,v 1.70 2001/12/31 01:53:45 dwelch Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/section.c
@ -41,6 +41,17 @@
#define NDEBUG
#include <internal/debug.h>
/* TYPES *********************************************************************/
typedef struct
{
PSECTION_OBJECT Section;
PMM_SECTION_SEGMENT Segment;
LARGE_INTEGER Offset;
BOOLEAN WasDirty;
BOOLEAN Private;
} MM_SECTION_PAGEOUT_CONTEXT;
/* GLOBALS *******************************************************************/
POBJECT_TYPE EXPORTED MmSectionObjectType = NULL;
@ -205,7 +216,7 @@ MmGetPageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
return(Entry);
}
VOID
VOID
MmSharePageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
ULONG Offset)
{
@ -222,13 +233,15 @@ MmSharePageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
DPRINT1("Maximum share count reached\n");
KeBugCheck(0);
}
Entry++;
Entry = (Entry & 0xFFFFF000) | (SHARE_COUNT(Entry) + 1);
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
}
VOID
MmUnsharePageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
ULONG Offset)
BOOLEAN
MmUnsharePageEntrySectionSegment(PSECTION_OBJECT Section,
PMM_SECTION_SEGMENT Segment,
ULONG Offset,
BOOLEAN Dirty)
{
ULONG Entry;
@ -243,8 +256,39 @@ MmUnsharePageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
DPRINT1("Zero share count for unshare\n");
KeBugCheck(0);
}
Entry--;
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
Entry = (Entry & 0xFFFFF000) | (SHARE_COUNT(Entry) - 1);
/*
* If we reducing the share count of this entry to zero then set the entry to zero and
* tell the cache the page is no longer mapped.
*/
if (SHARE_COUNT(Entry) == 0)
{
PFILE_OBJECT FileObject;
PREACTOS_COMMON_FCB_HEADER Fcb;
MmSetPageEntrySectionSegment(Segment, Offset, 0);
FileObject = Section->FileObject;
if (FileObject != NULL)
{
Fcb = (PREACTOS_COMMON_FCB_HEADER)FileObject->FsContext;
if (FileObject->Flags & FO_DIRECT_CACHE_PAGING_READ &&
(Offset % PAGESIZE) == 0)
{
NTSTATUS Status;
Status = CcRosUnmapCacheSegment(Fcb->Bcb, Offset, Dirty);
if (!NT_SUCCESS(Status))
{
KeBugCheck(0);
}
}
}
}
else
{
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
}
return(SHARE_COUNT(Entry) > 1);
}
NTSTATUS
@ -336,12 +380,7 @@ MiReadPage(PMEMORY_AREA MemoryArea,
* Allocate a page, this is rather complicated by the possibility
* we might have to move other things out of memory
*/
(*Page) = MmAllocPage(0);
while ((*Page) == NULL)
{
MmWaitForFreePages();
(*Page) = MmAllocPage(0);
}
Status = MmRequestPageMemoryConsumer(MC_USER, TRUE, Page);
/*
* Create an mdl to hold the page we are going to read data into.
@ -471,7 +510,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
Page = (PVOID)(Entry & 0xFFFFF000);
MmReferencePage(Page);
// MmSharePageEntrySectionSegment(Segment, Offset.u.LowPart);
MmSharePageEntrySectionSegment(Segment, Offset.u.LowPart);
Status = MmCreateVirtualMapping(PsGetCurrentProcess(),
Address,
@ -482,6 +521,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
DbgPrint("Unable to create virtual mapping\n");
KeBugCheck(0);
}
MmInsertRmap(Page, PsGetCurrentProcess(), (PVOID)PAGE_ROUND_DOWN(Address));
}
if (Locked)
{
@ -505,6 +545,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
Address,
MemoryArea->Attributes,
Offset.QuadPart);
/* Don't add an rmap entry since the page mapped could be for anything. */
if (Locked)
{
MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
@ -524,26 +565,25 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
/*
* Map anonymous memory for BSS sections
*/
if (Segment->Characteristics & IMAGE_SECTION_CHAR_BSS ||
Segment->Flags & MM_PAGEFILE_SECTION)
if (Segment->Characteristics & IMAGE_SECTION_CHAR_BSS)
{
Page = MmAllocPage(0);
while (Page == NULL)
Status = MmRequestPageMemoryConsumer(MC_USER, FALSE, &Page);
if (!NT_SUCCESS(Status))
{
MmUnlockSectionSegment(Segment);
MmUnlockSection(Section);
MmUnlockAddressSpace(AddressSpace);
MmWaitForFreePages();
MmRequestPageMemoryConsumer(MC_USER, TRUE, &Page);
MmLockAddressSpace(AddressSpace);
MmLockSection(Section);
MmLockSectionSegment(Segment);
Page = MmAllocPage(0);
}
Status = MmCreateVirtualMapping(PsGetCurrentProcess(),
Address,
MemoryArea->Attributes,
(ULONG)Page);
MmInsertRmap(Page, PsGetCurrentProcess(), (PVOID)PAGE_ROUND_DOWN(Address));
if (Locked)
{
MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
@ -575,7 +615,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
/*
* Get the entry corresponding to the offset within the section
*/
Entry = MmGetPageEntrySectionSegment(Segment, Offset.u.LowPart);
Entry = MmGetPageEntrySectionSegment(Segment, Offset.u.LowPart) & 0xFFFFF000;
if (Entry == 0)
{
@ -591,7 +631,14 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
MmUnlockSection(Section);
MmUnlockAddressSpace(AddressSpace);
Status = MiReadPage(MemoryArea, &Offset, &Page);
if (Segment->Flags & MM_PAGEFILE_SECTION)
{
Status = MmRequestPageMemoryConsumer(MC_USER, TRUE, &Page);
}
else
{
Status = MiReadPage(MemoryArea, &Offset, &Page);
}
if (!NT_SUCCESS(Status) && Status != STATUS_END_OF_FILE)
{
/*
@ -620,7 +667,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
* Check the entry. No one should change the status of a page
* that has a pending page-in.
*/
Entry1 = MmGetPageEntrySectionSegment(Segment, Offset.QuadPart);
Entry1 = MmGetPageEntrySectionSegment(Segment, Offset.QuadPart) & 0xFFFFF000;
if (Entry != Entry1)
{
DbgPrint("Someone changed ppte entry while we slept\n");
@ -633,12 +680,13 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
*/
Entry = (ULONG)Page;
MmSetPageEntrySectionSegment(Segment, Offset.QuadPart, Entry);
// MmSharePageEntrySectionSegment(Segment, Offset.QuadPart);
MmSharePageEntrySectionSegment(Segment, Offset.QuadPart);
Status = MmCreateVirtualMapping(PsGetCurrentProcess(),
Address,
Attributes,
(ULONG)Page);
MmInsertRmap(Page, PsGetCurrentProcess(), (PVOID)PAGE_ROUND_DOWN(Address));
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
@ -665,12 +713,13 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
Page = (PVOID)Entry;
MmReferencePage(Page);
// MmSharePageEntrySectionSegment(Segment, Offset.QuadPart);
MmSharePageEntrySectionSegment(Segment, Offset.QuadPart);
Status = MmCreateVirtualMapping(PsGetCurrentProcess(),
Address,
Attributes,
(ULONG)Page);
MmInsertRmap(Page, PsGetCurrentProcess(), (PVOID)PAGE_ROUND_DOWN(Address));
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
@ -794,12 +843,7 @@ MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
/*
* Allocate a page
*/
NewPage = MmAllocPage(0);
while (NewPage == NULL)
{
MmWaitForFreePages();
NewPage = MmAllocPage(0);
}
Status = MmRequestPageMemoryConsumer(MC_USER, TRUE, &NewPage);
/*
* Copy the old page
@ -818,6 +862,7 @@ MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
Address,
MemoryArea->Attributes,
(ULONG)NewPage);
MmInsertRmap(NewPage, PsGetCurrentProcess(), (PVOID)PAGE_ROUND_DOWN(Address));
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
@ -827,20 +872,288 @@ MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
{
MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
}
/*
* Unshare the old page.
*/
MmUnsharePageEntrySectionSegment(Section, Segment, Offset.QuadPart, FALSE);
MmDeleteRmap((PVOID)OldPage, PsGetCurrentProcess(), (PVOID)PAGE_ROUND_DOWN(Address));
MmDereferencePage((PVOID)OldPage);
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(STATUS_SUCCESS);
}
ULONG
VOID
MmPageOutDeleteMapping(PVOID Context, PEPROCESS Process, PVOID Address)
{
MM_SECTION_PAGEOUT_CONTEXT* PageOutContext;
BOOL WasDirty;
PageOutContext = (MM_SECTION_PAGEOUT_CONTEXT*)Context;
MmDeleteVirtualMapping(Process,
Address,
FALSE,
&WasDirty,
NULL);
PageOutContext->WasDirty = PageOutContext->WasDirty || WasDirty;
if (!PageOutContext->Private)
{
MmUnsharePageEntrySectionSegment(PageOutContext->Section,
PageOutContext->Segment,
PageOutContext->Offset.u.LowPart,
PageOutContext->WasDirty);
}
}
NTSTATUS
MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
MEMORY_AREA* MemoryArea,
PVOID Address,
PBOOLEAN Ul)
PMM_PAGEOP PageOp)
{
(*Ul) = FALSE;
return(0);
LARGE_INTEGER Offset;
PSECTION_OBJECT Section;
PMM_SECTION_SEGMENT Segment;
PVOID PhysicalAddress;
MM_SECTION_PAGEOUT_CONTEXT Context;
SWAPENTRY SwapEntry;
PMDL Mdl;
ULONG Entry;
BOOLEAN Private;
NTSTATUS Status;
PFILE_OBJECT FileObject;
PREACTOS_COMMON_FCB_HEADER Fcb;
BOOLEAN DirectMapped;
DPRINT("MmPageOutSection(Process %d, Address 0x%.8X)\n",
AddressSpace->Process->UniqueProcessId,
Address);
Address = (PVOID)PAGE_ROUND_DOWN(Address);
Offset.QuadPart = (ULONG)(Address - (ULONG)MemoryArea->BaseAddress) +
MemoryArea->Data.SectionData.ViewOffset;
FileObject = MemoryArea->Data.SectionData.Section->FileObject;
DirectMapped = FALSE;
if (FileObject != NULL)
{
Fcb = (PREACTOS_COMMON_FCB_HEADER)FileObject->FsContext;
/*
* If the file system is letting us go directly to the cache and the
* memory area was mapped at an offset in the file which is page aligned
* then note this is a direct mapped page.
*/
if (FileObject->Flags & FO_DIRECT_CACHE_PAGING_READ &&
(Offset.QuadPart % PAGESIZE) == 0)
{
DirectMapped = TRUE;
}
}
/*
* Get the segment and section.
*/
Segment = MemoryArea->Data.SectionData.Segment;
Section = MemoryArea->Data.SectionData.Section;
/*
* This should never happen even mapping of the physical memory are never
* placed in the rmap lists.
*/
if (Segment->Flags & SO_PHYSICAL_MEMORY)
{
KeBugCheck(0);
}
/*
* Get the section segment entry and the physical address.
*/
Entry = MmGetPageEntrySectionSegment(Segment, Offset.QuadPart);
PhysicalAddress = (PVOID)MmGetPhysicalAddressForProcess(AddressSpace->Process,
Address);
/*
* Prepare the context structure for the rmap delete call.
*/
Context.Section = Section;
Context.Segment = Segment;
Context.Offset = Offset;
Context.WasDirty = FALSE;
Context.Private = Private = ((PVOID)(Entry & 0xFFFFF000) != PhysicalAddress);
/*
* Paging only data mapped read-only is easy.
*/
if (MemoryArea->Attributes & PAGE_READONLY ||
MemoryArea->Attributes & PAGE_EXECUTE_READ)
{
/*
* Delete all mappings of this page.
*/
MmDeleteAllRmaps(PhysicalAddress, (PVOID)&Context, MmPageOutDeleteMapping);
if (Context.WasDirty)
{
KeBugCheck(0);
}
/*
* If this page wasn't direct mapped then we have a private copy so release
* back to the system; otherwise the cache manager will have handled freeing
* the cache segment which we mapped from.
*/
if (!DirectMapped)
{
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(STATUS_SUCCESS);
}
/*
* Otherwise we have read-write data.
*/
MmDeleteAllRmaps(PhysicalAddress, (PVOID)&Context, MmPageOutDeleteMapping);
/*
* If this wasn't a private page then we should have reduced the entry to
* zero by deleting all the rmaps.
*/
if (!Private && MmGetPageEntrySectionSegment(Segment, Offset.QuadPart) != 0)
{
KeBugCheck(0);
}
/*
* If the page wasn't dirty then we can just free it as for a readonly page.
* Since we unmapped all the mappings above we know it will not suddenly
* become dirty.
*/
if (!Context.WasDirty)
{
if (!DirectMapped)
{
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(STATUS_SUCCESS);
}
/*
* If this page was direct mapped from the cache then the cache manager
* will already have taken care of writing it back.
*/
if (DirectMapped)
{
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(STATUS_SUCCESS);
}
/*
* If necessary, allocate an entry in the paging file for this page
*/
SwapEntry = MmGetSavedSwapEntryPage((PVOID)PhysicalAddress);
if (SwapEntry == 0)
{
SwapEntry = MmAllocSwapPage();
if (SwapEntry == 0)
{
/*
* For private pages restore the old mappings.
*/
if (Private)
{
Status = MmCreateVirtualMapping(MemoryArea->Process,
Address,
MemoryArea->Attributes,
(ULONG)PhysicalAddress);
MmInsertRmap(PhysicalAddress,
MemoryArea->Process,
Address);
}
else
{
/*
* For non-private pages if the page wasn't direct mapped then
* set it back into section segment entry so we don't loose our
* copy. Otherwise it will be handled by the cache manager.
*/
if (!DirectMapped)
{
MmSetPageEntrySectionSegment(Segment, Offset.QuadPart, (ULONG)PhysicalAddress);
MmSharePageEntrySectionSegment(Segment, Offset.QuadPart);
}
}
PageOp->Status = STATUS_UNSUCCESSFUL;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(STATUS_UNSUCCESSFUL);
}
}
/*
* Write the page to the pagefile
*/
Mdl = MmCreateMdl(NULL, NULL, PAGESIZE);
MmBuildMdlFromPages(Mdl, (PULONG)&PhysicalAddress);
Status = MmWriteToSwapPage(SwapEntry, Mdl);
if (!NT_SUCCESS(Status))
{
DPRINT1("MM: Failed to write to swap page\n");
/*
* As above: undo our actions.
* FIXME: Also free the swap page.
*/
if (Private)
{
Status = MmCreateVirtualMapping(MemoryArea->Process,
Address,
MemoryArea->Attributes,
(ULONG)PhysicalAddress);
MmInsertRmap(PhysicalAddress,
MemoryArea->Process,
Address);
}
else
{
if (!DirectMapped)
{
MmSetPageEntrySectionSegment(Segment, Offset.QuadPart, (ULONG)PhysicalAddress);
MmSharePageEntrySectionSegment(Segment, Offset.QuadPart);
}
}
PageOp->Status = STATUS_UNSUCCESSFUL;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(STATUS_UNSUCCESSFUL);
}
/*
* Otherwise we have succeeded.
*/
if (!DirectMapped)
{
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(STATUS_SUCCESS);
}
VOID STDCALL
@ -1852,7 +2165,8 @@ NtMapViewOfSection(HANDLE SectionHandle,
}
VOID STATIC
MmFreeSectionPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr)
MmFreeSectionPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr,
BOOLEAN Dirty)
{
PMEMORY_AREA MArea;
ULONG Entry;
@ -1873,16 +2187,18 @@ MmFreeSectionPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG P
* Just dereference private pages
*/
if (PhysAddr != (Entry & 0xFFFFF000))
{
{
MmDeleteRmap((PVOID)PhysAddr, MArea->Process, Address);
MmDereferencePage((PVOID)PhysAddr);
}
else
{
#if 0
MmUnsharePageEntrySectionSegment(MArea->Data.SectionData.Segment,
Offset);
MmUnsharePageEntrySectionSegment(MArea->Data.SectionData.Section,
MArea->Data.SectionData.Segment,
Offset,
Dirty);
MmDeleteRmap((PVOID)PhysAddr, MArea->Process, Address);
MmDereferencePage((PVOID)PhysAddr);
#endif
}
}
}

View file

@ -1,4 +1,4 @@
/* $Id: virtual.c,v 1.50 2001/12/29 14:32:22 dwelch Exp $
/* $Id: virtual.c,v 1.51 2001/12/31 01:53:45 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -95,67 +95,56 @@ MmWritePageVirtualMemory(PMADDRESS_SPACE AddressSpace,
}
ULONG MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
PBOOLEAN Ul)
NTSTATUS
MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
PMM_PAGEOP PageOp)
{
ULONG PhysicalAddress;
PVOID PhysicalAddress;
BOOL WasDirty;
SWAPENTRY SwapEntry;
NTSTATUS Status;
PMDL Mdl;
PMM_PAGEOP PageOp;
/*
* Get or create a pageop
*/
PageOp = MmGetPageOp(MemoryArea, AddressSpace->Process->UniqueProcessId,
(PVOID)PAGE_ROUND_DOWN(Address), NULL, 0,
MM_PAGEOP_PAGEOUT);
if (PageOp->Thread != PsGetCurrentThread())
{
/*
* On the assumption that handling pageouts speedly rather than
* in strict order is better abandon this one.
*/
(*Ul) = FALSE;
MmReleasePageOp(PageOp);
return(STATUS_UNSUCCESSFUL);
}
DPRINT("MmPageOutVirtualMemory(Address 0x%.8X) PID %d\n",
Address, MemoryArea->Process->UniqueProcessId);
/*
* Paging out code or readonly data is easy.
*/
if ((MemoryArea->Attributes & PAGE_READONLY) ||
(MemoryArea->Attributes & PAGE_EXECUTE_READ))
{
MmRemovePageFromWorkingSet(AddressSpace->Process, Address);
MmDeleteVirtualMapping(PsGetCurrentProcess(), Address, FALSE,
NULL, &PhysicalAddress);
MmDereferencePage((PVOID)PhysicalAddress);
{
MmDeleteVirtualMapping(MemoryArea->Process, Address, FALSE,
NULL, (PULONG)&PhysicalAddress);
MmDeleteAllRmaps(PhysicalAddress, NULL, NULL);
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
*Ul = TRUE;
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(1);
return(STATUS_SUCCESS);
}
/*
* Otherwise this is read-write data
*/
MmDeleteVirtualMapping(PsGetCurrentProcess(), Address, FALSE,
&WasDirty, &PhysicalAddress);
MmDisableVirtualMapping(MemoryArea->Process, Address,
&WasDirty, (PULONG)&PhysicalAddress);
if (PhysicalAddress == 0)
{
KeBugCheck(0);
}
if (!WasDirty)
{
MmRemovePageFromWorkingSet(AddressSpace->Process, Address);
MmDereferencePage((PVOID)PhysicalAddress);
*Ul = TRUE;
MmDeleteVirtualMapping(MemoryArea->Process, Address, FALSE, NULL, NULL);
MmDeleteAllRmaps(PhysicalAddress, NULL, NULL);
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(1);
return(STATUS_SUCCESS);
}
/*
@ -167,15 +156,12 @@ ULONG MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
SwapEntry = MmAllocSwapPage();
if (SwapEntry == 0)
{
Status = MmCreateVirtualMapping(PsGetCurrentProcess(),
Address,
MemoryArea->Attributes,
PhysicalAddress);
*Ul = FALSE;
DPRINT("MM: Out of swap space.\n");
MmEnableVirtualMapping(MemoryArea->Process, Address);
PageOp->Status = STATUS_UNSUCCESSFUL;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(0);
return(STATUS_UNSUCCESSFUL);
}
}
@ -183,32 +169,28 @@ ULONG MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
* Write the page to the pagefile
*/
Mdl = MmCreateMdl(NULL, NULL, PAGESIZE);
MmBuildMdlFromPages(Mdl, &PhysicalAddress);
MmBuildMdlFromPages(Mdl, (PULONG)&PhysicalAddress);
Status = MmWriteToSwapPage(SwapEntry, Mdl);
if (!NT_SUCCESS(Status))
{
DPRINT1("MM: Failed to write to swap page\n");
Status = MmCreateVirtualMapping(PsGetCurrentProcess(),
Address,
MemoryArea->Attributes,
PhysicalAddress);
*Ul = FALSE;
MmEnableVirtualMapping(MemoryArea->Process, Address);
PageOp->Status = STATUS_UNSUCCESSFUL;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(0);
return(STATUS_UNSUCCESSFUL);
}
/*
* Otherwise we have succeeded, free the page
*/
MmRemovePageFromWorkingSet(AddressSpace->Process, Address);
MmDereferencePage((PVOID)PhysicalAddress);
*Ul = TRUE;
MmDeleteVirtualMapping(MemoryArea->Process, Address, FALSE, NULL, NULL);
MmDeleteAllRmaps(PhysicalAddress, NULL, NULL);
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
return(1);
return(STATUS_SUCCESS);
}
NTSTATUS
@ -324,20 +306,13 @@ MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
/*
* Try to allocate a page
*/
Page = MmAllocPage(0);
while (Page == NULL)
Status = MmRequestPageMemoryConsumer(MC_USER, FALSE, &Page);
if (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
MmWaitForFreePages();
MmLockAddressSpace(AddressSpace);
Page = MmAllocPage(0);
MmUnlockAddressSpace(AddressSpace);
Status = MmRequestPageMemoryConsumer(MC_USER, TRUE, &Page);
MmLockAddressSpace(AddressSpace);
}
/*
* Add the page to the process's working set
*/
MmAddPageToWorkingSet(PsGetCurrentProcess(),
(PVOID)PAGE_ROUND_DOWN(Address));
/*
* Set the page. If we fail because we are out of memory then
@ -350,7 +325,7 @@ MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
while (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
MmWaitForFreePages();
KeBugCheck(0);
MmLockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(PsGetCurrentProcess(),
Address,
@ -364,6 +339,11 @@ MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
return(Status);
}
/*
* Add the page to the process's working set
*/
MmInsertRmap(Page, PsGetCurrentProcess(), (PVOID)PAGE_ROUND_DOWN(Address));
/*
* Finish the operation
*/
@ -407,8 +387,8 @@ MmModifyAttributes(PMADDRESS_SPACE AddressSpace,
FALSE, NULL, NULL);
if (PhysicalAddr.u.LowPart != 0)
{
MmRemovePageFromWorkingSet(AddressSpace->Process,
BaseAddress + (i*PAGESIZE));
MmDeleteRmap((PVOID)PhysicalAddr.u.LowPart, AddressSpace->Process,
BaseAddress + (i * PAGESIZE));
MmDereferencePage((PVOID)(ULONG)(PhysicalAddr.u.LowPart));
}
}
@ -1025,13 +1005,14 @@ VOID STATIC
MmFreeVirtualMemoryPage(PVOID Context,
MEMORY_AREA* MemoryArea,
PVOID Address,
ULONG PhysicalAddr)
ULONG PhysicalAddr,
BOOLEAN Dirty)
{
PEPROCESS Process = (PEPROCESS)Context;
if (PhysicalAddr != 0)
{
MmRemovePageFromWorkingSet(Process, Address);
MmDeleteRmap((PVOID)PhysicalAddr, Process, Address);
MmDereferencePage((PVOID)PhysicalAddr);
}
}

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: wset.c,v 1.10 2001/08/03 09:36:19 ei Exp $
/* $Id: wset.c,v 1.11 2001/12/31 01:53:45 dwelch Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/wset.c
@ -33,338 +33,34 @@
#include <internal/ps.h>
#include <ntos/minmax.h>
#define NDEBUG
#include <internal/debug.h>
/* FUNCTIONS *****************************************************************/
VOID
KiInitializeCircularQueue(PKCIRCULAR_QUEUE Queue, ULONG MaximumSize,
PVOID* Mem)
NTSTATUS
MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages)
{
Queue->MaximumSize = MaximumSize;
Queue->CurrentSize = 0;
Queue->First = Queue->Last = 0;
Queue->Mem = Mem;
}
VOID
KiInsertItemCircularQueue(PKCIRCULAR_QUEUE Queue, PVOID Item)
{
Queue->Mem[Queue->Last] = Item;
Queue->Last = (Queue->Last + 1) % Queue->MaximumSize;
Queue->CurrentSize++;
}
VOID
KiRemoveItemCircularQueue(PKCIRCULAR_QUEUE Queue, PVOID Item)
{
ULONG i, j;
j = Queue->First;
for (i = 0; i < Queue->CurrentSize; i++)
{
if (Queue->Mem[j] == Item)
{
if (j != Queue->First)
{
if (j > 0 && Queue->First <= j)
{
memmove(&Queue->Mem[Queue->First + 1],
&Queue->Mem[Queue->First],
sizeof(PVOID) * (j - Queue->First));
}
else if (j > 0 && Queue->First > j)
{
memmove(&Queue->Mem[1], &Queue->Mem[0],
sizeof(PVOID) * j);
Queue->Mem[0] = Queue->Mem[Queue->MaximumSize - 1];
memmove(&Queue[Queue->First + 1], &Queue[Queue->First],
sizeof(PVOID) *
((Queue->MaximumSize - 1) - Queue->First));
}
else if (j == 0)
{
Queue->Mem[0] = Queue->Mem[Queue->MaximumSize];
memmove(&Queue[Queue->First + 1], &Queue[Queue->First],
sizeof(PVOID) *
((Queue->MaximumSize - 1) - Queue->First));
}
}
Queue->First = (Queue->First + 1) % Queue->MaximumSize;
Queue->CurrentSize--;
return;
}
j = (j + 1) % Queue->MaximumSize;
}
KeBugCheck(0);
}
PVOID
MmGetDirtyPagesFromWorkingSet(struct _EPROCESS* Process)
{
return(NULL);
}
VOID
MmLockWorkingSet(PEPROCESS Process)
{
(VOID)KeWaitForMutexObject(&Process->WorkingSetLock,
0,
KernelMode,
FALSE,
NULL);
}
VOID MmUnlockWorkingSet(PEPROCESS Process)
{
KeReleaseMutex(&Process->WorkingSetLock, FALSE);
}
VOID
MmInitializeWorkingSet(PEPROCESS Process, PMADDRESS_SPACE AddressSpace)
{
PVOID BaseAddress;
ULONG MaximumLength;
PVOID FirstPage;
PVOID CurrentPhysicalAddress;
PVOID NextPhysicalAddress;
NTSTATUS Status;
/*
* The maximum number of pages in the working set is the maximum
* of the size of physical memory and the size of the user address space.
* In either case the maximum size is 3Mb.
*/
MaximumLength = MmStats.NrTotalPages - MmStats.NrReservedPages;
MaximumLength = min(MaximumLength, KERNEL_BASE / PAGESIZE);
MaximumLength = PAGE_ROUND_UP(MaximumLength * sizeof(ULONG));
FirstPage = MmAllocPageMaybeSwap(0);
if (FirstPage == NULL)
{
KeBugCheck(0);
}
MmLockAddressSpace(MmGetKernelAddressSpace());
BaseAddress = NULL;
Status = MmCreateMemoryArea(NULL,
MmGetKernelAddressSpace(),
MEMORY_AREA_WORKING_SET,
&BaseAddress,
MaximumLength,
0,
&AddressSpace->WorkingSetArea,
FALSE);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
if (!NT_SUCCESS(Status))
{
KeBugCheck(0);
}
KiInitializeCircularQueue(&Process->AddressSpace.WSQueue,
MaximumLength,
(PVOID*)BaseAddress);
KeInitializeMutex(&Process->WorkingSetLock, 1);
Process->WorkingSetPage = BaseAddress;
Status = MmCreateVirtualMapping(NULL,
Process->WorkingSetPage,
PAGE_READWRITE,
(ULONG)FirstPage);
if (!NT_SUCCESS(Status))
{
KeBugCheck(0);
}
memset(Process->WorkingSetPage, 0, 4096);
}
(*NrFreedPages) = 0;
ULONG
MmPageOutPage(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MArea,
PVOID Address,
PBOOLEAN Ul)
{
ULONG Count;
switch(MArea->Type)
{
case MEMORY_AREA_SYSTEM:
*Ul = FALSE;
return(0);
case MEMORY_AREA_SECTION_VIEW_COMMIT:
Count = MmPageOutSectionView(AddressSpace,
MArea,
Address,
Ul);
return(Count);
case MEMORY_AREA_VIRTUAL_MEMORY:
Count = MmPageOutVirtualMemory(AddressSpace,
MArea,
Address,
Ul);
return(Count);
}
*Ul = FALSE;
return(0);
}
VOID
MmLruAdjustWorkingSet(PEPROCESS Process)
{
ULONG i, j;
PVOID CurrentAddress;
MmLockWorkingSet(Process);
j = Process->AddressSpace.WSQueue.First;
for (i = 0; i < Process->AddressSpace.WSQueue.CurrentSize; i++)
CurrentPhysicalAddress = MmGetLRUFirstUserPage();
while (CurrentPhysicalAddress != NULL && Target > 0)
{
CurrentAddress = Process->AddressSpace.WSQueue.Mem[j];
if (MmIsAccessedAndResetAccessPage(Process, CurrentAddress))
NextPhysicalAddress = MmGetLRUNextUserPage(CurrentPhysicalAddress);
Status = MmPageOutPhysicalAddress(CurrentPhysicalAddress);
if (NT_SUCCESS(Status))
{
DbgPrint("L");
KiRemoveItemCircularQueue(&Process->AddressSpace.WSQueue,
CurrentAddress);
KiInsertItemCircularQueue(&Process->AddressSpace.WSQueue,
CurrentAddress);
DPRINT("Succeeded\n");
Target--;
(*NrFreedPages)++;
}
j = (j + 1) % Process->AddressSpace.WSQueue.MaximumSize;
CurrentPhysicalAddress = NextPhysicalAddress;
}
MmUnlockWorkingSet(Process);
return(STATUS_SUCCESS);
}
ULONG
MmTrimWorkingSet(PEPROCESS Process, ULONG ReduceHint)
/*
* Reduce the size of the working set of a process
*/
{
ULONG i, j;
PMADDRESS_SPACE AddressSpace;
ULONG Count;
BOOLEAN Ul;
MmLockWorkingSet(Process);
AddressSpace = &Process->AddressSpace;
Count = 0;
j = AddressSpace->WSQueue.First;
for (i = 0; i < AddressSpace->WSQueue.CurrentSize; )
{
PVOID Address;
PMEMORY_AREA MArea;
Address = AddressSpace->WSQueue.Mem[j];
MArea = MmOpenMemoryAreaByAddress(AddressSpace, Address);
if (MArea == NULL)
{
KeBugCheck(0);
}
Count = Count + MmPageOutPage(AddressSpace, MArea, Address, &Ul);
if (Ul)
{
MmLockWorkingSet(Process);
j = AddressSpace->WSQueue.First;
i = 0;
}
else
{
j = (j + 1) % AddressSpace->WSQueue.MaximumSize;
i++;
}
if (Count == ReduceHint)
{
MmUnlockWorkingSet(Process);
return(Count);
}
}
MmUnlockWorkingSet(Process);
return(Count);
}
VOID
MmRemovePageFromWorkingSet(PEPROCESS Process, PVOID Address)
/*
* Remove a page from a process's working set.
*/
{
MmLockWorkingSet(Process);
KiRemoveItemCircularQueue(&Process->AddressSpace.WSQueue, Address);
MmUnlockWorkingSet(Process);
}
VOID
MmAddPageToWorkingSet(PEPROCESS Process, PVOID Address)
/*
* insert a page into a process's working set
*/
{
PMADDRESS_SPACE AddressSpace;
PVOID NextAddress;
AddressSpace = &Process->AddressSpace;
/*
* This can't happen unless there is a bug.
*/
if (AddressSpace->WSQueue.CurrentSize == AddressSpace->WSQueue.MaximumSize)
{
KeBugCheck(0);
}
/*
* lock the working set
*/
MmLockWorkingSet(Process);
/*
* if we are growing the working set then check to see if we need
* to allocate a page
*/
NextAddress =
(PVOID)PAGE_ROUND_DOWN((PVOID)&
AddressSpace->WSQueue.Mem[AddressSpace->WSQueue.Last]);
if (!MmIsPagePresent(NULL, NextAddress))
{
PVOID Page;
NTSTATUS Status;
/* FIXME: This isn't correct */
Page = MmAllocPageMaybeSwap(0);
if (Page == 0)
{
KeBugCheck(0);
}
Status = MmCreateVirtualMapping(NULL,
NextAddress,
PAGE_READWRITE,
(ULONG)Page);
if (!NT_SUCCESS(Status))
{
KeBugCheck(0);
}
}
/*
* Insert the page in the working set
*/
KiInsertItemCircularQueue(&AddressSpace->WSQueue, Address);
/*
* And unlock
*/
MmUnlockWorkingSet(Process);
}