- Changed all internal memory functions to use the page frame number instead of the physical address.

- Allowed MmCreateVirtualMapping to create mappings for more than one page.

svn path=/trunk/; revision=10331
This commit is contained in:
Hartmut Birr 2004-08-01 07:24:59 +00:00
parent 5f1aa6ff61
commit fd411381da
26 changed files with 1029 additions and 1205 deletions

View file

@ -1,4 +1,4 @@
/* $Id: copy.c,v 1.27 2004/06/21 04:11:44 ion Exp $
/* $Id: copy.c,v 1.28 2004/08/01 07:24:57 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -26,11 +26,7 @@
#define ROUND_DOWN(N, S) ((N) - ((N) % (S)))
#if defined(__GNUC__)
static PHYSICAL_ADDRESS CcZeroPage = (PHYSICAL_ADDRESS)0LL;
#else
static PHYSICAL_ADDRESS CcZeroPage = { 0 };
#endif
static PFN_TYPE CcZeroPage = 0;
#define MAX_ZERO_LENGTH (256 * 1024)
#define MAX_RW_LENGTH (64 * 1024)
@ -54,7 +50,7 @@ CcInitCacheZeroPage(VOID)
{
NTSTATUS Status;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &CcZeroPage);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &CcZeroPage);
if (!NT_SUCCESS(Status))
{
DbgPrint("Can't allocate CcZeroPage.\n");
@ -122,7 +118,7 @@ ReadCacheSegmentChain(PBCB Bcb, ULONG ReadOffset, ULONG Length,
PCACHE_SEGMENT current2;
ULONG current_size;
ULONG i;
ULONG offset;
PPFN_TYPE MdlPages;
/*
* Count the maximum number of bytes we could read starting
@ -142,17 +138,13 @@ ReadCacheSegmentChain(PBCB Bcb, ULONG ReadOffset, ULONG Length,
MmInitializeMdl(Mdl, NULL, current_size);
Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
current2 = current;
offset = 0;
MdlPages = (PPFN_TYPE)(Mdl + 1);
while (current2 != NULL && !current2->Valid)
{
for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++)
PVOID address = current2->BaseAddress;
for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++, address += PAGE_SIZE)
{
PVOID address;
PHYSICAL_ADDRESS page;
address = (char*)current2->BaseAddress + (i * PAGE_SIZE);
page = MmGetPhysicalAddressForProcess(NULL, address);
((PULONG)(Mdl + 1))[offset] = page.QuadPart >> PAGE_SHIFT;
offset++;
*MdlPages++ = MmGetPfnForProcess(NULL, address);
}
current2 = current2->NextInChain;
}
@ -649,7 +641,7 @@ CcZeroData (IN PFILE_OBJECT FileObject,
Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
{
((PULONG)(Mdl + 1))[i] = CcZeroPage.QuadPart >> PAGE_SHIFT;
((PPFN_TYPE)(Mdl + 1))[i] = CcZeroPage;
}
KeInitializeEvent(&Event, NotificationEvent, FALSE);
Status = IoPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: view.c,v 1.72 2004/02/26 19:29:55 hbirr Exp $
/* $Id: view.c,v 1.73 2004/08/01 07:24:57 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/cc/view.c
@ -282,8 +282,8 @@ CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
ExReleaseFastMutex(&ViewLock);
for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
{
PHYSICAL_ADDRESS Page;
Page = MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE);
PFN_TYPE Page;
Page = MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT;
Status = MmPageOutPhysicalAddress(Page);
if (!NT_SUCCESS(Status))
{
@ -488,8 +488,10 @@ CcRosCreateCacheSegment(PBCB Bcb,
PLIST_ENTRY current_entry;
NTSTATUS Status;
KIRQL oldIrql;
PPFN_TYPE Pfn;
#ifdef CACHE_BITMAP
ULONG StartingOffset;
#else
#endif
PHYSICAL_ADDRESS BoundaryAddressMultiple;
@ -611,26 +613,24 @@ CcRosCreateCacheSegment(PBCB Bcb,
KEBUGCHECK(0);
}
#endif
Pfn = alloca(sizeof(PFN_TYPE) * (Bcb->CacheSegmentSize / PAGE_SIZE));
for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++)
{
PHYSICAL_ADDRESS Page;
Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Page);
if (!NT_SUCCESS(Status))
{
KEBUGCHECK(0);
}
Status = MmCreateVirtualMapping(NULL,
(char*)current->BaseAddress + (i * PAGE_SIZE),
PAGE_READWRITE,
Page,
TRUE);
Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Pfn[i]);
if (!NT_SUCCESS(Status))
{
KEBUGCHECK(0);
}
}
Status = MmCreateVirtualMapping(NULL,
current->BaseAddress,
PAGE_READWRITE,
Pfn,
Bcb->CacheSegmentSize / PAGE_SIZE);
if (!NT_SUCCESS(Status))
{
KEBUGCHECK(0);
}
return(STATUS_SUCCESS);
}
@ -770,12 +770,12 @@ CcRosRequestCacheSegment(PBCB Bcb,
#else
STATIC VOID
CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry, BOOLEAN Dirty)
PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
{
assert(SwapEntry == 0);
if (PhysAddr.QuadPart != 0)
if (Page != 0)
{
MmReleasePageMemoryConsumer(MC_CACHE, PhysAddr);
MmReleasePageMemoryConsumer(MC_CACHE, Page);
}
}
#endif
@ -789,7 +789,7 @@ CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
ULONG i;
ULONG RegionSize;
ULONG Base;
PHYSICAL_ADDRESS PhysicalAddr;
PFN_TYPE Page;
KIRQL oldIrql;
#endif
DPRINT("Freeing cache segment %x\n", CacheSeg);
@ -803,8 +803,8 @@ CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
CacheSeg->BaseAddress + (i * PAGE_SIZE),
FALSE,
NULL,
&PhysicalAddr);
MmReleasePageMemoryConsumer(MC_CACHE, PhysicalAddr);
&Page);
MmReleasePageMemoryConsumer(MC_CACHE, Page);
}
KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);

View file

@ -20,6 +20,8 @@ struct _MM_RMAP_ENTRY;
struct _MM_PAGEOP;
typedef ULONG SWAPENTRY;
typedef ULONG PFN_TYPE, *PPFN_TYPE;
#define MEMORY_AREA_INVALID (0)
#define MEMORY_AREA_SECTION_VIEW (1)
#define MEMORY_AREA_CONTINUOUS_MEMORY (2)
@ -42,6 +44,11 @@ typedef ULONG SWAPENTRY;
#define NR_SECTION_PAGE_TABLES (1024)
#define NR_SECTION_PAGE_ENTRIES (1024)
#ifndef __USE_W32API
#define MM_LOWEST_USER_ADDRESS (4096)
#endif
#define STATUS_MM_RESTART_OPERATION ((NTSTATUS)0xD0000001)
/*
* Additional flags for protection attributes
@ -178,178 +185,6 @@ extern PVOID MmSystemRangeStart;
#endif /* __USE_W32API */
/* FUNCTIONS */
VOID MmLockAddressSpace(PMADDRESS_SPACE AddressSpace);
VOID MmUnlockAddressSpace(PMADDRESS_SPACE AddressSpace);
VOID MmInitializeKernelAddressSpace(VOID);
PMADDRESS_SPACE MmGetCurrentAddressSpace(VOID);
PMADDRESS_SPACE MmGetKernelAddressSpace(VOID);
NTSTATUS MmInitializeAddressSpace(struct _EPROCESS* Process,
PMADDRESS_SPACE AddressSpace);
NTSTATUS MmDestroyAddressSpace(PMADDRESS_SPACE AddressSpace);
PVOID STDCALL MmAllocateSection (IN ULONG Length);
NTSTATUS MmCreateMemoryArea(struct _EPROCESS* Process,
PMADDRESS_SPACE AddressSpace,
ULONG Type,
PVOID* BaseAddress,
ULONG Length,
ULONG Attributes,
MEMORY_AREA** Result,
BOOL FixedAddress,
BOOL TopDown,
PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL);
MEMORY_AREA* MmOpenMemoryAreaByAddress(PMADDRESS_SPACE AddressSpace,
PVOID Address);
ULONG MmFindGapAtAddress(PMADDRESS_SPACE AddressSpace,
PVOID Address);
NTSTATUS MmInitMemoryAreas(VOID);
VOID MiInitializeNonPagedPool(VOID);
NTSTATUS MmFreeMemoryArea(PMADDRESS_SPACE AddressSpace,
PVOID BaseAddress,
ULONG Length,
VOID (*FreePage)(PVOID Context, MEMORY_AREA* MemoryArea,
PVOID Address, PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry,
BOOLEAN Dirty),
PVOID FreePageContext);
VOID MmDumpMemoryAreas(PLIST_ENTRY ListHead);
NTSTATUS MmLockMemoryArea(MEMORY_AREA* MemoryArea);
NTSTATUS MmUnlockMemoryArea(MEMORY_AREA* MemoryArea);
NTSTATUS MmInitSectionImplementation(VOID);
#ifndef __USE_W32API
#define MM_LOWEST_USER_ADDRESS (4096)
#endif
PMEMORY_AREA MmSplitMemoryArea(struct _EPROCESS* Process,
PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA OriginalMemoryArea,
PVOID BaseAddress,
ULONG Length,
ULONG NewType,
ULONG NewAttributes);
PVOID
MmInitializePageList(PVOID FirstPhysKernelAddress,
PVOID LastPhysKernelAddress,
ULONG MemorySizeInPages,
ULONG LastKernelBase,
PADDRESS_RANGE BIOSMemoryMap,
ULONG AddressRangeCount);
PHYSICAL_ADDRESS
MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry);
VOID MmDereferencePage(PHYSICAL_ADDRESS PhysicalAddress);
VOID MmReferencePage(PHYSICAL_ADDRESS PhysicalAddress);
VOID MmDeletePageTable(struct _EPROCESS* Process,
PVOID Address);
NTSTATUS MmCopyMmInfo(struct _EPROCESS* Src,
struct _EPROCESS* Dest);
NTSTATUS MmReleaseMmInfo(struct _EPROCESS* Process);
NTSTATUS Mmi386ReleaseMmInfo(struct _EPROCESS* Process);
VOID
MmDeleteVirtualMapping(struct _EPROCESS* Process,
PVOID Address,
BOOL FreePage,
BOOL* WasDirty,
PHYSICAL_ADDRESS* PhysicalPage);
VOID MmUpdateStackPageDir(PULONG LocalPageDir, struct _KTHREAD* KThread);
#define MM_PAGE_CLEAN (0)
#define MM_PAGE_DIRTY (1)
VOID MmBuildMdlFromPages(PMDL Mdl, PULONG Pages);
PVOID MmGetMdlPageAddress(PMDL Mdl, PVOID Offset);
VOID MiShutdownMemoryManager(VOID);
PHYSICAL_ADDRESS
MmGetPhysicalAddressForProcess(struct _EPROCESS* Process,
PVOID Address);
NTSTATUS STDCALL
MmUnmapViewOfSection(struct _EPROCESS* Process, PVOID BaseAddress);
VOID MmInitPagingFile(VOID);
/* FIXME: it should be in ddk/mmfuncs.h */
NTSTATUS
STDCALL
MmCreateSection (
OUT PSECTION_OBJECT * SectionObject,
IN ACCESS_MASK DesiredAccess,
IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL,
IN PLARGE_INTEGER MaximumSize,
IN ULONG SectionPageProtection,
IN ULONG AllocationAttributes,
IN HANDLE FileHandle OPTIONAL,
IN PFILE_OBJECT File OPTIONAL
);
NTSTATUS MmPageFault(ULONG Cs,
PULONG Eip,
PULONG Eax,
ULONG Cr2,
ULONG ErrorCode);
NTSTATUS
MmAccessFault(KPROCESSOR_MODE Mode,
ULONG Address,
BOOLEAN FromMdl);
NTSTATUS
MmNotPresentFault(KPROCESSOR_MODE Mode,
ULONG Address,
BOOLEAN FromMdl);
NTSTATUS
MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
MEMORY_AREA* MemoryArea,
PVOID Address,
BOOLEAN Locked);
NTSTATUS
MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
MEMORY_AREA* MemoryArea,
PVOID Address,
BOOLEAN Locked);
NTSTATUS MmWaitForPage(PVOID Page);
VOID MmClearWaitPage(PVOID Page);
VOID MmSetWaitPage(PVOID Page);
BOOLEAN MmIsDirtyPage(struct _EPROCESS* Process, PVOID Address);
BOOLEAN MmIsPageTablePresent(PVOID PAddress);
NTSTATUS
MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
struct _MM_PAGEOP* PageOp);
NTSTATUS
MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
struct _MM_PAGEOP* PageOp);
MEMORY_AREA* MmOpenMemoryAreaByRegion(PMADDRESS_SPACE AddressSpace,
PVOID Address,
ULONG Length);
PVOID MmFindGap(PMADDRESS_SPACE AddressSpace, ULONG Length, BOOL TopDown);
VOID ExUnmapPage(PVOID Addr);
PVOID ExAllocatePage(VOID);
BOOLEAN MmReserveSwapPages(ULONG Nr);
VOID MmDereserveSwapPages(ULONG Nr);
SWAPENTRY MmAllocSwapPage(VOID);
VOID MmFreeSwapPage(SWAPENTRY Entry);
VOID MmInit1(ULONG FirstKernelPhysAddress,
ULONG LastKernelPhysAddress,
ULONG LastKernelAddress,
PADDRESS_RANGE BIOSMemoryMap,
ULONG AddressRangeCount,
ULONG MaxMemInMeg);
VOID MmInit2(VOID);
VOID MmInit3(VOID);
VOID MiFreeInitMemory(VOID);
#if 0
NTSTATUS MmInitPagerThread(VOID);
#endif
VOID MiInitBalancerThread(VOID);
NTSTATUS MmInitZeroPageThread(VOID);
VOID MiInitKernelMap(VOID);
NTSTATUS MmCreatePageTable(PVOID PAddress);
typedef struct
{
ULONG NrTotalPages;
@ -366,50 +201,8 @@ typedef struct
extern MM_STATS MmStats;
PVOID
MmGetDirtyPagesFromWorkingSet(struct _EPROCESS* Process);
NTSTATUS
MmWriteToSwapPage(SWAPENTRY SwapEntry, PHYSICAL_ADDRESS* Page);
NTSTATUS
MmReadFromSwapPage(SWAPENTRY SwapEntry, PHYSICAL_ADDRESS* Page);
VOID
MmSetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG Flags);
ULONG
MmGetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress);
VOID MmSetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress,
SWAPENTRY SavedSwapEntry);
SWAPENTRY MmGetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress);
VOID MmSetCleanPage(struct _EPROCESS* Process, PVOID Address);
VOID MmLockPage(PHYSICAL_ADDRESS PhysicalPage);
VOID MmUnlockPage(PHYSICAL_ADDRESS PhysicalPage);
ULONG MmGetLockCountPage(PHYSICAL_ADDRESS PhysicalPage);
NTSTATUS MmSafeCopyFromUser(PVOID Dest, const VOID *Src, ULONG Count);
NTSTATUS MmSafeCopyToUser(PVOID Dest, const VOID *Src, ULONG Count);
NTSTATUS
MmCreatePhysicalMemorySection(VOID);
PHYSICAL_ADDRESS
MmGetContinuousPages(ULONG NumberOfBytes,
PHYSICAL_ADDRESS LowestAcceptableAddress,
PHYSICAL_ADDRESS HighestAcceptableAddress,
ULONG Alignment);
#define MM_PHYSICAL_PAGE_MPW_PENDING (0x8)
NTSTATUS
MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
MEMORY_AREA* MemoryArea,
PVOID Address,
BOOLEAN Locked);
ULONG
MmGetPageProtect(struct _EPROCESS* Process, PVOID Address);
PVOID
ExAllocatePageWithPhysPage(PHYSICAL_ADDRESS PhysPage);
ULONG
MmGetReferenceCountPage(PHYSICAL_ADDRESS PhysicalAddress);
BOOLEAN
MmIsUsablePage(PHYSICAL_ADDRESS PhysicalAddress);
#define MM_PAGEOP_PAGEIN (1)
#define MM_PAGEOP_PAGEOUT (2)
#define MM_PAGEOP_PAGESYNCH (3)
@ -446,86 +239,12 @@ typedef struct _MM_PAGEOP
ULONG Offset;
} MM_PAGEOP, *PMM_PAGEOP;
VOID
MmReleasePageOp(PMM_PAGEOP PageOp);
PMM_PAGEOP
MmGetPageOp(PMEMORY_AREA MArea, ULONG Pid, PVOID Address,
PMM_SECTION_SEGMENT Segment, ULONG Offset, ULONG OpType, BOOL First);
PMM_PAGEOP
MmCheckForPageOp(PMEMORY_AREA MArea, ULONG Pid, PVOID Address,
PMM_SECTION_SEGMENT Segment, ULONG Offset);
VOID
MmInitializePageOp(VOID);
VOID
MiDebugDumpNonPagedPool(BOOLEAN NewOnly);
VOID
MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly);
VOID
MmMarkPageMapped(PHYSICAL_ADDRESS PhysicalAddress);
VOID
MmMarkPageUnmapped(PHYSICAL_ADDRESS PhysicalAddress);
VOID
MmFreeSectionSegments(PFILE_OBJECT FileObject);
VOID
MmFreeVirtualMemory(struct _EPROCESS* Process, PMEMORY_AREA MemoryArea);
NTSTATUS
MiCopyFromUserPage(PHYSICAL_ADDRESS DestPhysPage, PVOID SourceAddress);
NTSTATUS
MiZeroPage(PHYSICAL_ADDRESS PhysPage);
BOOLEAN
MmIsAccessedAndResetAccessPage(struct _EPROCESS* Process, PVOID Address);
#define STATUS_MM_RESTART_OPERATION ((NTSTATUS)0xD0000001)
NTSTATUS
MmCreateVirtualMappingForKernel(PVOID Address,
ULONG flProtect,
PHYSICAL_ADDRESS PhysicalAddress);
NTSTATUS MmCommitPagedPoolAddress(PVOID Address, BOOLEAN Locked);
NTSTATUS MmCreateVirtualMapping(struct _EPROCESS* Process,
PVOID Address,
ULONG flProtect,
PHYSICAL_ADDRESS PhysicalAddress,
BOOLEAN MayWait);
NTSTATUS
MmCreateVirtualMappingUnsafe(struct _EPROCESS* Process,
PVOID Address,
ULONG flProtect,
PHYSICAL_ADDRESS PhysicalAddress,
BOOLEAN MayWait);
VOID MmSetPageProtect(struct _EPROCESS* Process,
PVOID Address,
ULONG flProtect);
BOOLEAN MmIsPagePresent(struct _EPROCESS* Process,
PVOID Address);
VOID MmInitGlobalKernelPageDirectory(VOID);
/* Memory balancing. */
VOID
MmInitializeMemoryConsumer(ULONG Consumer,
NTSTATUS (*Trim)(ULONG Target, ULONG Priority,
PULONG NrFreed));
VOID
MmInitializeBalancer(ULONG NrAvailablePages, ULONG NrSystemPages);
NTSTATUS
MmReleasePageMemoryConsumer(ULONG Consumer, PHYSICAL_ADDRESS Page);
NTSTATUS
MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
PHYSICAL_ADDRESS* AllocatedPage);
#define MC_CACHE (0)
#define MC_USER (1)
#define MC_PPOOL (2)
#define MC_NPPOOL (3)
#define MC_MAXIMUM (4)
typedef struct _MM_MEMORY_CONSUMER
{
ULONG PagesUsed;
@ -536,56 +255,8 @@ MM_MEMORY_CONSUMER, *PMM_MEMORY_CONSUMER;
extern MM_MEMORY_CONSUMER MiMemoryConsumers[MC_MAXIMUM];
VOID
MmSetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress,
struct _MM_RMAP_ENTRY* ListHead);
struct _MM_RMAP_ENTRY*
MmGetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress);
VOID
MmInsertRmap(PHYSICAL_ADDRESS PhysicalAddress, PEPROCESS Process,
PVOID Address);
VOID
MmDeleteAllRmaps(PHYSICAL_ADDRESS PhysicalAddress, PVOID Context,
VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process, PVOID Address));
VOID
MmDeleteRmap(PHYSICAL_ADDRESS PhysicalAddress, PEPROCESS Process,
PVOID Address);
VOID
MmInitializeRmapList(VOID);
PHYSICAL_ADDRESS
MmGetLRUNextUserPage(PHYSICAL_ADDRESS PreviousPhysicalAddress);
PHYSICAL_ADDRESS
MmGetLRUFirstUserPage(VOID);
NTSTATUS
MmPageOutPhysicalAddress(PHYSICAL_ADDRESS PhysicalAddress);
NTSTATUS
MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages);
VOID
MmDisableVirtualMapping(PEPROCESS Process, PVOID Address, BOOL* WasDirty, PHYSICAL_ADDRESS* PhysicalAddr);
VOID MmEnableVirtualMapping(PEPROCESS Process, PVOID Address);
VOID
MmDeletePageFileMapping(PEPROCESS Process, PVOID Address,
SWAPENTRY* SwapEntry);
NTSTATUS
MmCreatePageFileMapping(PEPROCESS Process,
PVOID Address,
SWAPENTRY SwapEntry);
BOOLEAN MmIsPageSwapEntry(PEPROCESS Process, PVOID Address);
VOID
MmTransferOwnershipPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG NewConsumer);
VOID MmSetDirtyPage(PEPROCESS Process, PVOID Address);
VOID
MmInitializeMdlImplementation(VOID);
extern PHYSICAL_ADDRESS MmSharedDataPagePhysicalAddress;
struct _KTRAP_FRAME;
NTSTATUS STDCALL
MmDumpToPagingFile(ULONG BugCode,
ULONG BugCodeParameter1,
ULONG BugCodeParameter2,
ULONG BugCodeParameter3,
ULONG BugCodeParameter4,
struct _KTRAP_FRAME* TrapFrame);
typedef VOID (*PMM_ALTER_REGION_FUNC)(PMADDRESS_SPACE AddressSpace,
PVOID BaseAddress, ULONG Length,
@ -600,75 +271,474 @@ typedef struct _MM_REGION
LIST_ENTRY RegionListEntry;
} MM_REGION, *PMM_REGION;
NTSTATUS
MmAlterRegion(PMADDRESS_SPACE AddressSpace, PVOID BaseAddress,
PLIST_ENTRY RegionListHead, PVOID StartAddress, ULONG Length,
ULONG NewType, ULONG NewProtect,
PMM_ALTER_REGION_FUNC AlterFunc);
VOID
MmInitialiseRegion(PLIST_ENTRY RegionListHead, ULONG Length, ULONG Type,
ULONG Protect);
PMM_REGION
MmFindRegion(PVOID BaseAddress, PLIST_ENTRY RegionListHead, PVOID Address,
PVOID* RegionBaseAddress);
/* FUNCTIONS */
/* aspace.c ******************************************************************/
VOID MmLockAddressSpace(PMADDRESS_SPACE AddressSpace);
VOID MmUnlockAddressSpace(PMADDRESS_SPACE AddressSpace);
VOID MmInitializeKernelAddressSpace(VOID);
PMADDRESS_SPACE MmGetCurrentAddressSpace(VOID);
PMADDRESS_SPACE MmGetKernelAddressSpace(VOID);
NTSTATUS MmInitializeAddressSpace(struct _EPROCESS* Process,
PMADDRESS_SPACE AddressSpace);
NTSTATUS MmDestroyAddressSpace(PMADDRESS_SPACE AddressSpace);
/* marea.c *******************************************************************/
NTSTATUS MmCreateMemoryArea(struct _EPROCESS* Process,
PMADDRESS_SPACE AddressSpace,
ULONG Type,
PVOID* BaseAddress,
ULONG Length,
ULONG Attributes,
MEMORY_AREA** Result,
BOOL FixedAddress,
BOOL TopDown,
PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL);
MEMORY_AREA* MmOpenMemoryAreaByAddress(PMADDRESS_SPACE AddressSpace,
PVOID Address);
ULONG MmFindGapAtAddress(PMADDRESS_SPACE AddressSpace,
PVOID Address);
NTSTATUS MmInitMemoryAreas(VOID);
NTSTATUS MmFreeMemoryArea(PMADDRESS_SPACE AddressSpace,
PVOID BaseAddress,
ULONG Length,
VOID (*FreePage)(PVOID Context, MEMORY_AREA* MemoryArea,
PVOID Address, PFN_TYPE Page, SWAPENTRY SwapEntry,
BOOLEAN Dirty),
PVOID FreePageContext);
VOID MmDumpMemoryAreas(PLIST_ENTRY ListHead);
NTSTATUS MmLockMemoryArea(MEMORY_AREA* MemoryArea);
NTSTATUS MmUnlockMemoryArea(MEMORY_AREA* MemoryArea);
PMEMORY_AREA MmSplitMemoryArea(struct _EPROCESS* Process,
PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA OriginalMemoryArea,
PVOID BaseAddress,
ULONG Length,
ULONG NewType,
ULONG NewAttributes);
MEMORY_AREA* MmOpenMemoryAreaByRegion(PMADDRESS_SPACE AddressSpace,
PVOID Address,
ULONG Length);
PVOID MmFindGap(PMADDRESS_SPACE AddressSpace, ULONG Length, BOOL TopDown);
/* npool.c *******************************************************************/
VOID MiDebugDumpNonPagedPool(BOOLEAN NewOnly);
VOID MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly);
VOID MiInitializeNonPagedPool(VOID);
PVOID MmGetMdlPageAddress(PMDL Mdl, PVOID Offset);
/* mdl.c *********************************************************************/
VOID MmBuildMdlFromPages(PMDL Mdl, PULONG Pages);
/* mminit.c ******************************************************************/
VOID MiShutdownMemoryManager(VOID);
VOID MmInit1(ULONG FirstKernelPhysAddress,
ULONG LastKernelPhysAddress,
ULONG LastKernelAddress,
PADDRESS_RANGE BIOSMemoryMap,
ULONG AddressRangeCount,
ULONG MaxMemInMeg);
VOID MmInit2(VOID);
VOID MmInit3(VOID);
VOID MiFreeInitMemory(VOID);
VOID MmInitializeMdlImplementation(VOID);
/* pagefile.c ****************************************************************/
SWAPENTRY MmAllocSwapPage(VOID);
VOID MmDereserveSwapPages(ULONG Nr);
VOID MmFreeSwapPage(SWAPENTRY Entry);
VOID MmInitPagingFile(VOID);
NTSTATUS MmReadFromSwapPage(SWAPENTRY SwapEntry, PFN_TYPE Page);
BOOLEAN MmReserveSwapPages(ULONG Nr);
NTSTATUS MmWriteToSwapPage(SWAPENTRY SwapEntry, PFN_TYPE Page);
NTSTATUS STDCALL
MmDumpToPagingFile(ULONG BugCode,
ULONG BugCodeParameter1,
ULONG BugCodeParameter2,
ULONG BugCodeParameter3,
ULONG BugCodeParameter4,
struct _KTRAP_FRAME* TrapFrame);
BOOLEAN MmIsAvailableSwapPage(VOID);
VOID MmShowOutOfSpaceMessagePagingFile(VOID);
/* i386/pfault.c *************************************************************/
NTSTATUS MmPageFault(ULONG Cs,
PULONG Eip,
PULONG Eax,
ULONG Cr2,
ULONG ErrorCode);
/* mm.c **********************************************************************/
NTSTATUS MmAccessFault(KPROCESSOR_MODE Mode,
ULONG Address,
BOOLEAN FromMdl);
NTSTATUS MmNotPresentFault(KPROCESSOR_MODE Mode,
ULONG Address,
BOOLEAN FromMdl);
/* anonmem.c *****************************************************************/
NTSTATUS MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
MEMORY_AREA* MemoryArea,
PVOID Address,
BOOLEAN Locked);
NTSTATUS MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
struct _MM_PAGEOP* PageOp);
NTSTATUS STDCALL
MmQueryAnonMem(PMEMORY_AREA MemoryArea,
PVOID Address,
PMEMORY_BASIC_INFORMATION Info,
PULONG ResultLength);
VOID MmFreeVirtualMemory(struct _EPROCESS* Process, PMEMORY_AREA MemoryArea);
NTSTATUS MmProtectAnonMem(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID BaseAddress,
ULONG Length,
ULONG Protect,
PULONG OldProtect);
NTSTATUS MmWritePageVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MArea,
PVOID Address,
PMM_PAGEOP PageOp);
/* kmap.c ********************************************************************/
PVOID ExAllocatePage(VOID);
VOID ExUnmapPage(PVOID Addr);
VOID MiInitKernelMap(VOID);
PVOID ExAllocatePageWithPhysPage(PFN_TYPE Page);
NTSTATUS MiCopyFromUserPage(PFN_TYPE Page, PVOID SourceAddress);
NTSTATUS MiZeroPage(PFN_TYPE Page);
/* memsafe.s *****************************************************************/
NTSTATUS MmSafeCopyFromUser(PVOID Dest, const VOID *Src, ULONG Count);
NTSTATUS MmSafeCopyToUser(PVOID Dest, const VOID *Src, ULONG Count);
/* pageop.c ******************************************************************/
VOID
MmReleasePageOp(PMM_PAGEOP PageOp);
PMM_PAGEOP
MmGetPageOp(PMEMORY_AREA MArea, ULONG Pid, PVOID Address,
PMM_SECTION_SEGMENT Segment, ULONG Offset, ULONG OpType, BOOL First);
PMM_PAGEOP
MmCheckForPageOp(PMEMORY_AREA MArea, ULONG Pid, PVOID Address,
PMM_SECTION_SEGMENT Segment, ULONG Offset);
VOID
MmInitializePageOp(VOID);
/* balace.c ******************************************************************/
VOID MmInitializeMemoryConsumer(ULONG Consumer,
NTSTATUS (*Trim)(ULONG Target, ULONG Priority, PULONG NrFreed));
VOID MmInitializeBalancer(ULONG NrAvailablePages, ULONG NrSystemPages);
NTSTATUS MmReleasePageMemoryConsumer(ULONG Consumer, PFN_TYPE Page);
NTSTATUS MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN MyWait, PPFN_TYPE AllocatedPage);
VOID MiInitBalancerThread(VOID);
VOID MmRebalanceMemoryConsumers(VOID);
/* rmap.c **************************************************************/
VOID MmSetRmapListHeadPage(PFN_TYPE Page, struct _MM_RMAP_ENTRY* ListHead);
struct _MM_RMAP_ENTRY* MmGetRmapListHeadPage(PFN_TYPE Page);
VOID MmInsertRmap(PFN_TYPE Page, PEPROCESS Process, PVOID Address);
VOID MmDeleteAllRmaps(PFN_TYPE Page, PVOID Context,
VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process, PVOID Address));
VOID MmDeleteRmap(PFN_TYPE Page, PEPROCESS Process, PVOID Address);
VOID MmInitializeRmapList(VOID);
VOID MmSetCleanAllRmaps(PFN_TYPE Page);
VOID MmSetDirtyAllRmaps(PFN_TYPE Page);
BOOL MmIsDirtyPageRmap(PFN_TYPE Page);
NTSTATUS MmWritePagePhysicalAddress(PFN_TYPE Page);
NTSTATUS MmPageOutPhysicalAddress(PFN_TYPE Page);
/* freelist.c **********************************************************/
PFN_TYPE MmGetLRUNextUserPage(PFN_TYPE PreviousPage);
PFN_TYPE MmGetLRUFirstUserPage(VOID);
VOID MmSetLRULastPage(PFN_TYPE Page);
VOID MmLockPage(PFN_TYPE Page);
VOID MmUnlockPage(PFN_TYPE Page);
ULONG MmGetLockCountPage(PFN_TYPE Page);
PVOID MmInitializePageList(PVOID FirstPhysKernelAddress,
PVOID LastPhysKernelAddress,
ULONG MemorySizeInPages,
ULONG LastKernelBase,
PADDRESS_RANGE BIOSMemoryMap,
ULONG AddressRangeCount);
PFN_TYPE MmGetContinuousPages(ULONG NumberOfBytes,
PHYSICAL_ADDRESS LowestAcceptableAddress,
PHYSICAL_ADDRESS HighestAcceptableAddress,
ULONG Alignment);
NTSTATUS MmInitZeroPageThread(VOID);
/* i386/page.c *********************************************************/
NTSTATUS MmCreateVirtualMappingForKernel(PVOID Address,
ULONG flProtect,
PPFN_TYPE Pages,
ULONG PageCount);
NTSTATUS MmCommitPagedPoolAddress(PVOID Address, BOOLEAN Locked);
NTSTATUS MmCreateVirtualMapping(struct _EPROCESS* Process,
PVOID Address,
ULONG flProtect,
PPFN_TYPE Pages,
ULONG PageCount);
NTSTATUS MmCreateVirtualMappingUnsafe(struct _EPROCESS* Process,
PVOID Address,
ULONG flProtect,
PPFN_TYPE Pages,
ULONG PageCount);
ULONG MmGetPageProtect(struct _EPROCESS* Process, PVOID Address);
VOID MmSetPageProtect(struct _EPROCESS* Process,
PVOID Address,
ULONG flProtect);
BOOLEAN MmIsPagePresent(struct _EPROCESS* Process,
PVOID Address);
VOID MmInitGlobalKernelPageDirectory(VOID);
VOID MmDisableVirtualMapping(PEPROCESS Process, PVOID Address, BOOL* WasDirty, PPFN_TYPE Page);
VOID MmEnableVirtualMapping(PEPROCESS Process, PVOID Address);
VOID MmRawDeleteVirtualMapping(PVOID Address);
VOID MmDeletePageFileMapping(PEPROCESS Process, PVOID Address, SWAPENTRY* SwapEntry);
NTSTATUS MmCreatePageFileMapping(PEPROCESS Process, PVOID Address, SWAPENTRY SwapEntry);
BOOLEAN MmIsPageSwapEntry(PEPROCESS Process, PVOID Address);
VOID MmTransferOwnershipPage(PFN_TYPE Page, ULONG NewConsumer);
VOID MmSetDirtyPage(PEPROCESS Process, PVOID Address);
PFN_TYPE MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry);
VOID MmDereferencePage(PFN_TYPE Page);
VOID MmReferencePage(PFN_TYPE Page);
BOOLEAN MmIsAccessedAndResetAccessPage(struct _EPROCESS* Process, PVOID Address);
ULONG MmGetReferenceCountPage(PFN_TYPE Page);
BOOLEAN MmIsUsablePage(PFN_TYPE Page);
VOID MmSetFlagsPage(PFN_TYPE Page, ULONG Flags);
ULONG MmGetFlagsPage(PFN_TYPE Page);
VOID MmSetSavedSwapEntryPage(PFN_TYPE Page, SWAPENTRY SavedSwapEntry);
SWAPENTRY MmGetSavedSwapEntryPage(PFN_TYPE Page);
VOID MmSetCleanPage(struct _EPROCESS* Process, PVOID Address);
NTSTATUS MmCreatePageTable(PVOID PAddress);
VOID MmDeletePageTable(struct _EPROCESS* Process, PVOID Address);
PFN_TYPE MmGetPfnForProcess(struct _EPROCESS* Process, PVOID Address);
NTSTATUS MmCopyMmInfo(struct _EPROCESS* Src, struct _EPROCESS* Dest);
NTSTATUS MmReleaseMmInfo(struct _EPROCESS* Process);
NTSTATUS Mmi386ReleaseMmInfo(struct _EPROCESS* Process);
VOID MmDeleteVirtualMapping(struct _EPROCESS* Process,
PVOID Address,
BOOL FreePage,
BOOL* WasDirty,
PPFN_TYPE Page);
VOID MmUpdateStackPageDir(PULONG LocalPageDir, struct _KTHREAD* KThread);
BOOLEAN MmIsDirtyPage(struct _EPROCESS* Process, PVOID Address);
VOID MmMarkPageMapped(PFN_TYPE Page);
VOID MmMarkPageUnmapped(PFN_TYPE Page);
/* wset.c ********************************************************************/
NTSTATUS MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages);
/* region.c ************************************************************/
NTSTATUS MmAlterRegion(PMADDRESS_SPACE AddressSpace, PVOID BaseAddress,
PLIST_ENTRY RegionListHead, PVOID StartAddress, ULONG Length,
ULONG NewType, ULONG NewProtect,
PMM_ALTER_REGION_FUNC AlterFunc);
VOID MmInitialiseRegion(PLIST_ENTRY RegionListHead, ULONG Length, ULONG Type,
ULONG Protect);
PMM_REGION MmFindRegion(PVOID BaseAddress, PLIST_ENTRY RegionListHead, PVOID Address,
PVOID* RegionBaseAddress);
/* section.c *****************************************************************/
PVOID STDCALL
MmAllocateSection (IN ULONG Length);
NTSTATUS STDCALL
MmQuerySectionView(PMEMORY_AREA MemoryArea,
PVOID Address,
PMEMORY_BASIC_INFORMATION Info,
PULONG ResultLength);
NTSTATUS
MmProtectAnonMem(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID BaseAddress,
ULONG Length,
ULONG Protect,
PULONG OldProtect);
NTSTATUS
NTSTATUS
MmProtectSectionView(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID BaseAddress,
ULONG Length,
ULONG Protect,
PULONG OldProtect);
NTSTATUS
MmWritePageSectionView(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MArea,
PVOID Address,
PMM_PAGEOP PageOp);
NTSTATUS MmInitSectionImplementation(VOID);
NTSTATUS STDCALL
MmUnmapViewOfSection(struct _EPROCESS* Process, PVOID BaseAddress);
/* FIXME: it should be in ddk/mmfuncs.h */
NTSTATUS STDCALL
MmCreateSection (OUT PSECTION_OBJECT * SectionObject,
IN ACCESS_MASK DesiredAccess,
IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL,
IN PLARGE_INTEGER MaximumSize,
IN ULONG SectionPageProtection,
IN ULONG AllocationAttributes,
IN HANDLE FileHandle OPTIONAL,
IN PFILE_OBJECT File OPTIONAL);
NTSTATUS
MmWritePageVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MArea,
MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
MEMORY_AREA* MemoryArea,
PVOID Address,
BOOLEAN Locked);
NTSTATUS
MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
struct _MM_PAGEOP* PageOp);
NTSTATUS
MmCreatePhysicalMemorySection(VOID);
NTSTATUS
MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
MEMORY_AREA* MemoryArea,
PVOID Address,
PMM_PAGEOP PageOp);
BOOLEAN Locked);
VOID
MmSetCleanAllRmaps(PHYSICAL_ADDRESS PhysicalAddress);
VOID
MmSetDirtyAllRmaps(PHYSICAL_ADDRESS PhysicalAddress);
NTSTATUS
MmWritePagePhysicalAddress(PHYSICAL_ADDRESS PhysicalAddress);
BOOL
MmIsDirtyPageRmap(PHYSICAL_ADDRESS PhysicalAddress);
MmFreeSectionSegments(PFILE_OBJECT FileObject);
/* mpw.c *********************************************************************/
NTSTATUS MmInitMpwThread(VOID);
BOOLEAN
MmIsAvailableSwapPage(VOID);
VOID
MmShowOutOfSpaceMessagePagingFile(VOID);
VOID
MmRebalanceMemoryConsumers(VOID);
BOOLEAN
MiIsPagerThread(VOID);
VOID
MiStartPagerThread(VOID);
VOID
MmSetLRULastPage(PHYSICAL_ADDRESS PhysicalAddress);
VOID
MmRawDeleteVirtualMapping(PVOID Address);
VOID
MiStopPagerThread(VOID);
/* pager.c *******************************************************************/
BOOLEAN MiIsPagerThread(VOID);
VOID MiStartPagerThread(VOID);
VOID MiStopPagerThread(VOID);
#endif

View file

@ -1,4 +1,4 @@
/* $Id: mdl.c,v 1.15 2004/05/15 22:51:38 hbirr Exp $
/* $Id: mdl.c,v 1.16 2004/08/01 07:24:57 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -13,6 +13,7 @@
#include <ddk/ntddk.h>
#include <internal/pool.h>
#include <internal/mm.h>
#define NDEBUG
#include <internal/debug.h>
@ -91,8 +92,8 @@ IoBuildPartialMdl(PMDL SourceMdl,
PVOID VirtualAddress,
ULONG Length)
{
PULONG TargetPages = (PULONG)(TargetMdl + 1);
PULONG SourcePages = (PULONG)(SourceMdl + 1);
PPFN_TYPE TargetPages = (PPFN_TYPE)(TargetMdl + 1);
PPFN_TYPE SourcePages = (PPFN_TYPE)(SourceMdl + 1);
ULONG Count;
ULONG Delta;
@ -116,7 +117,7 @@ IoBuildPartialMdl(PMDL SourceMdl,
DPRINT("Delta %d, Count %d\n", Delta, Count);
memcpy(TargetPages, SourcePages, Count * sizeof(ULONG));
memcpy(TargetPages, SourcePages, Count * sizeof(PFN_TYPE));
}

View file

@ -40,7 +40,7 @@
ULONG KiPcrInitDone = 0;
static ULONG PcrsAllocated = 0;
static PHYSICAL_ADDRESS PcrPages[MAXIMUM_PROCESSORS];
static PFN_TYPE PcrPages[MAXIMUM_PROCESSORS];
ULONG Ke386CpuidFlags, Ke386CpuidFlags2, Ke386CpuidExFlags;
ULONG Ke386Cpuid = 300;
@ -105,7 +105,7 @@ Ki386GetCpuId(VOID)
VOID INIT_FUNCTION
KePrepareForApplicationProcessorInit(ULONG Id)
{
MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &PcrPages[Id]);
MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &PcrPages[Id]);
KiGdtPrepareForApplicationProcessorInit(Id);
}
@ -122,7 +122,8 @@ KeApplicationProcessorInit(VOID)
KPCR = (PKPCR)(KPCR_BASE + (Offset * PAGE_SIZE));
MmCreateVirtualMappingForKernel((PVOID)KPCR,
PAGE_READWRITE,
PcrPages[Offset]);
&PcrPages[Offset],
1);
memset(KPCR, 0, PAGE_SIZE);
KPCR->ProcessorNumber = (UCHAR)Offset;
KPCR->Self = KPCR;

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: kthread.c,v 1.47 2004/06/23 22:31:51 ion Exp $
/* $Id: kthread.c,v 1.48 2004/08/01 07:24:59 hbirr Exp $
*
* FILE: ntoskrnl/ke/kthread.c
* PURPOSE: Microkernel thread support
@ -58,12 +58,12 @@ KeCapturePersistentThreadState(
VOID
KeFreeStackPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry, BOOLEAN Dirty)
PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
{
assert(SwapEntry == 0);
if (PhysAddr.QuadPart != 0)
if (Page != 0)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, PhysAddr);
MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
}
}
@ -143,6 +143,7 @@ KeInitializeThread(PKPROCESS Process, PKTHREAD Thread, BOOLEAN First)
InitializeListHead(&Thread->MutantListHead);
if (!First)
{
PFN_TYPE Page[MM_STACK_SIZE / PAGE_SIZE];
KernelStack = NULL;
MmLockAddressSpace(MmGetKernelAddressSpace());
@ -165,17 +166,20 @@ KeInitializeThread(PKPROCESS Process, PKTHREAD Thread, BOOLEAN First)
}
for (i = 0; i < (MM_STACK_SIZE / PAGE_SIZE); i++)
{
PHYSICAL_ADDRESS Page;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page[i]);
if (!NT_SUCCESS(Status))
{
KEBUGCHECK(0);
}
Status = MmCreateVirtualMapping(NULL,
(char*)KernelStack + (i * PAGE_SIZE),
PAGE_EXECUTE_READWRITE,
Page,
TRUE);
}
Status = MmCreateVirtualMapping(NULL,
KernelStack,
PAGE_READWRITE,
Page,
MM_STACK_SIZE / PAGE_SIZE);
if (!NT_SUCCESS(Status))
{
KEBUGCHECK(0);
}
Thread->InitialStack = (char*)KernelStack + MM_STACK_SIZE;
Thread->StackBase = (char*)KernelStack + MM_STACK_SIZE;

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: process.c,v 1.20 2004/06/23 22:31:51 ion Exp $
/* $Id: process.c,v 1.21 2004/08/01 07:24:59 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/ke/process.c
@ -73,7 +73,7 @@ KeAttachProcess (PEPROCESS Process)
To prevent this, make sure the page directory of the process we're
attaching to is up-to-date. */
AttachedProcessPageDir = ExAllocatePageWithPhysPage(Process->Pcb.DirectoryTableBase);
AttachedProcessPageDir = ExAllocatePageWithPhysPage(Process->Pcb.DirectoryTableBase.QuadPart >> PAGE_SHIFT);
MmUpdateStackPageDir(AttachedProcessPageDir, &CurrentThread->Tcb);
ExUnmapPage(AttachedProcessPageDir);

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: anonmem.c,v 1.29 2004/07/10 17:01:03 hbirr Exp $
/* $Id: anonmem.c,v 1.30 2004/08/01 07:24:57 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/anonmem.c
@ -45,7 +45,7 @@ MmWritePageVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMM_PAGEOP PageOp)
{
SWAPENTRY SwapEntry;
LARGE_INTEGER PhysicalAddress;
PFN_TYPE Page;
NTSTATUS Status;
/*
@ -59,8 +59,7 @@ MmWritePageVirtualMemory(PMADDRESS_SPACE AddressSpace,
return(STATUS_UNSUCCESSFUL);
}
PhysicalAddress =
MmGetPhysicalAddressForProcess(AddressSpace->Process, Address);
Page = MmGetPfnForProcess(AddressSpace->Process, Address);
/*
* Get that the page actually is dirty.
@ -81,7 +80,7 @@ MmWritePageVirtualMemory(PMADDRESS_SPACE AddressSpace,
/*
* If necessary, allocate an entry in the paging file for this page
*/
SwapEntry = MmGetSavedSwapEntryPage(PhysicalAddress);
SwapEntry = MmGetSavedSwapEntryPage(Page);
if (SwapEntry == 0)
{
SwapEntry = MmAllocSwapPage();
@ -98,7 +97,7 @@ MmWritePageVirtualMemory(PMADDRESS_SPACE AddressSpace,
/*
* Write the page to the pagefile
*/
Status = MmWriteToSwapPage(SwapEntry, &PhysicalAddress);
Status = MmWriteToSwapPage(SwapEntry, Page);
if (!NT_SUCCESS(Status))
{
DPRINT1("MM: Failed to write to swap page (Status was 0x%.8X)\n",
@ -113,7 +112,7 @@ MmWritePageVirtualMemory(PMADDRESS_SPACE AddressSpace,
/*
* Otherwise we have succeeded.
*/
MmSetSavedSwapEntryPage(PhysicalAddress, SwapEntry);
MmSetSavedSwapEntryPage(Page, SwapEntry);
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
@ -126,7 +125,7 @@ MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
PVOID Address,
PMM_PAGEOP PageOp)
{
PHYSICAL_ADDRESS PhysicalAddress;
PFN_TYPE Page;
BOOL WasDirty;
SWAPENTRY SwapEntry;
NTSTATUS Status;
@ -149,9 +148,9 @@ MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
* Disable the virtual mapping.
*/
MmDisableVirtualMapping(MemoryArea->Process, Address,
&WasDirty, &PhysicalAddress);
&WasDirty, &Page);
if (PhysicalAddress.QuadPart == 0)
if (Page == 0)
{
KEBUGCHECK(0);
}
@ -162,13 +161,13 @@ MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
if (!WasDirty)
{
MmDeleteVirtualMapping(MemoryArea->Process, Address, FALSE, NULL, NULL);
MmDeleteAllRmaps(PhysicalAddress, NULL, NULL);
if ((SwapEntry = MmGetSavedSwapEntryPage(PhysicalAddress)) != 0)
MmDeleteAllRmaps(Page, NULL, NULL);
if ((SwapEntry = MmGetSavedSwapEntryPage(Page)) != 0)
{
MmCreatePageFileMapping(MemoryArea->Process, Address, SwapEntry);
MmSetSavedSwapEntryPage(PhysicalAddress, 0);
MmSetSavedSwapEntryPage(Page, 0);
}
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
MmReleasePageMemoryConsumer(MC_USER, Page);
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
@ -178,7 +177,7 @@ MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
/*
* If necessary, allocate an entry in the paging file for this page
*/
SwapEntry = MmGetSavedSwapEntryPage(PhysicalAddress);
SwapEntry = MmGetSavedSwapEntryPage(Page);
if (SwapEntry == 0)
{
SwapEntry = MmAllocSwapPage();
@ -196,7 +195,7 @@ MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
/*
* Write the page to the pagefile
*/
Status = MmWriteToSwapPage(SwapEntry, &PhysicalAddress);
Status = MmWriteToSwapPage(SwapEntry, Page);
if (!NT_SUCCESS(Status))
{
DPRINT1("MM: Failed to write to swap page (Status was 0x%.8X)\n",
@ -211,12 +210,12 @@ MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
/*
* Otherwise we have succeeded, free the page
*/
DPRINT("MM: Swapped out virtual memory page 0x%.8X!\n", PhysicalAddress);
DPRINT("MM: Swapped out virtual memory page 0x%.8X!\n", Page << PAGE_SHIFT);
MmDeleteVirtualMapping(MemoryArea->Process, Address, FALSE, NULL, NULL);
MmCreatePageFileMapping(MemoryArea->Process, Address, SwapEntry);
MmDeleteAllRmaps(PhysicalAddress, NULL, NULL);
MmSetSavedSwapEntryPage(PhysicalAddress, 0);
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
MmDeleteAllRmaps(Page, NULL, NULL);
MmSetSavedSwapEntryPage(Page, 0);
MmReleasePageMemoryConsumer(MC_USER, Page);
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
@ -238,7 +237,7 @@ MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
* NOTES: This function is called with the address space lock held.
*/
{
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
NTSTATUS Status;
PMM_REGION Region;
PMM_PAGEOP PageOp;
@ -252,7 +251,7 @@ MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
{
if (Locked)
{
MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
MmLockPage(MmGetPfnForProcess(NULL, Address));
}
return(STATUS_SUCCESS);
}
@ -337,7 +336,7 @@ MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
MmLockAddressSpace(AddressSpace);
if (Locked)
{
MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
MmLockPage(MmGetPfnForProcess(NULL, Address));
}
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
@ -368,7 +367,7 @@ MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
SWAPENTRY SwapEntry;
MmDeletePageFileMapping(MemoryArea->Process, Address, &SwapEntry);
Status = MmReadFromSwapPage(SwapEntry, &Page);
Status = MmReadFromSwapPage(SwapEntry, Page);
if (!NT_SUCCESS(Status))
{
KEBUGCHECK(0);
@ -383,16 +382,16 @@ MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(MemoryArea->Process,
(PVOID)PAGE_ROUND_DOWN(Address),
Region->Protect,
Page,
FALSE);
&Page,
1);
while (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(MemoryArea->Process,
Address,
Region->Protect,
Page,
TRUE);
&Page,
1);
MmLockAddressSpace(AddressSpace);
}
if (!NT_SUCCESS(Status))
@ -412,7 +411,7 @@ MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
*/
if (Locked)
{
MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
MmLockPage(Page);
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
@ -442,7 +441,7 @@ MmModifyAttributes(PMADDRESS_SPACE AddressSpace,
for (i=0; i < PAGE_ROUND_UP(RegionSize)/PAGE_SIZE; i++)
{
LARGE_INTEGER PhysicalAddr;
PFN_TYPE Page;
if (MmIsPageSwapEntry(AddressSpace->Process,
(char*)BaseAddress + (i * PAGE_SIZE)))
@ -458,19 +457,19 @@ MmModifyAttributes(PMADDRESS_SPACE AddressSpace,
{
MmDeleteVirtualMapping(AddressSpace->Process,
(char*)BaseAddress + (i*PAGE_SIZE),
FALSE, NULL, &PhysicalAddr);
if (PhysicalAddr.QuadPart != 0)
FALSE, NULL, &Page);
if (Page != 0)
{
SWAPENTRY SavedSwapEntry;
SavedSwapEntry = MmGetSavedSwapEntryPage(PhysicalAddr);
SavedSwapEntry = MmGetSavedSwapEntryPage(Page);
if (SavedSwapEntry != 0)
{
MmFreeSwapPage(SavedSwapEntry);
MmSetSavedSwapEntryPage(PhysicalAddr, 0);
MmSetSavedSwapEntryPage(Page, 0);
}
MmDeleteRmap(PhysicalAddr, AddressSpace->Process,
MmDeleteRmap(Page, AddressSpace->Process,
(char*)BaseAddress + (i * PAGE_SIZE));
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddr);
MmReleasePageMemoryConsumer(MC_USER, Page);
}
}
}
@ -481,7 +480,7 @@ MmModifyAttributes(PMADDRESS_SPACE AddressSpace,
* alter the attributes for any allocated pages within the region
*/
if (NewType == MEM_COMMIT && OldType == MEM_COMMIT &&
OldProtect != NewProtect)
OldProtect != NewProtect)
{
ULONG i;
@ -665,23 +664,23 @@ VOID STATIC
MmFreeVirtualMemoryPage(PVOID Context,
MEMORY_AREA* MemoryArea,
PVOID Address,
PHYSICAL_ADDRESS PhysicalAddr,
PFN_TYPE Page,
SWAPENTRY SwapEntry,
BOOLEAN Dirty)
{
PEPROCESS Process = (PEPROCESS)Context;
if (PhysicalAddr.QuadPart != 0)
if (Page != 0)
{
SWAPENTRY SavedSwapEntry;
SavedSwapEntry = MmGetSavedSwapEntryPage(PhysicalAddr);
SavedSwapEntry = MmGetSavedSwapEntryPage(Page);
if (SavedSwapEntry != 0)
{
MmFreeSwapPage(SavedSwapEntry);
MmSetSavedSwapEntryPage(PhysicalAddr, 0);
MmSetSavedSwapEntryPage(Page, 0);
}
MmDeleteRmap(PhysicalAddr, Process, Address);
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddr);
MmDeleteRmap(Page, Process, Address);
MmReleasePageMemoryConsumer(MC_USER, Page);
}
else if (SwapEntry != 0)
{

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: balance.c,v 1.30 2004/07/31 09:44:35 hbirr Exp $
/* $Id: balance.c,v 1.31 2004/08/01 07:24:57 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/balance.c
@ -38,7 +38,7 @@
/* TYPES ********************************************************************/
typedef struct _MM_ALLOCATION_REQUEST
{
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
LIST_ENTRY ListEntry;
KEVENT Event;
}
@ -98,19 +98,13 @@ MmInitializeMemoryConsumer(ULONG Consumer,
}
NTSTATUS
MmReleasePageMemoryConsumer(ULONG Consumer, PHYSICAL_ADDRESS Page)
MmReleasePageMemoryConsumer(ULONG Consumer, PFN_TYPE Page)
{
PMM_ALLOCATION_REQUEST Request;
PLIST_ENTRY Entry;
KIRQL oldIrql;
#if defined(__GNUC__)
if (Page.QuadPart == 0LL)
#else
if (Page.QuadPart == 0)
#endif
if (Page == 0)
{
DPRINT1("Tried to release page zero.\n");
KEBUGCHECK(0);
@ -196,10 +190,10 @@ MiIsBalancerThread(VOID)
NTSTATUS
MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
PHYSICAL_ADDRESS* AllocatedPage)
PPFN_TYPE AllocatedPage)
{
ULONG OldUsed;
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
KIRQL oldIrql;
/*
@ -223,14 +217,7 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
if (Consumer == MC_NPPOOL || MiIsBalancerThread())
{
Page = MmAllocPage(Consumer, 0);
#if defined(__GNUC__)
if (Page.QuadPart == 0LL)
#else
if (Page.QuadPart == 0)
#endif
if (Page == 0)
{
KEBUGCHECK(0);
}
@ -257,12 +244,7 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
}
/* Insert an allocation request. */
#if defined(__GNUC__)
Request.Page.QuadPart = 0LL;
#else
Request.Page.QuadPart = 0;
#endif
Request.Page = 0;
KeInitializeEvent(&Request.Event, NotificationEvent, FALSE);
InterlockedIncrement((LONG *)&MiPagesRequired);
@ -283,14 +265,7 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
NULL);
Page = Request.Page;
#if defined(__GNUC__)
if (Page.QuadPart == 0LL)
#else
if (Page.QuadPart == 0)
#endif
if (Page == 0)
{
KEBUGCHECK(0);
}
@ -304,14 +279,7 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
* Actually allocate the page.
*/
Page = MmAllocPage(Consumer, 0);
#if defined(__GNUC__)
if (Page.QuadPart == 0LL)
#else
if (Page.QuadPart == 0)
#endif
if (Page == 0)
{
KEBUGCHECK(0);
}

View file

@ -1,4 +1,4 @@
/* $Id: cont.c,v 1.31 2004/04/10 22:35:25 gdalsnes Exp $
/* $Id: cont.c,v 1.32 2004/08/01 07:24:57 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -21,13 +21,13 @@
VOID STATIC
MmFreeContinuousPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry,
PFN_TYPE Page, SWAPENTRY SwapEntry,
BOOLEAN Dirty)
{
assert(SwapEntry == 0);
if (PhysAddr.QuadPart != 0)
if (Page != 0)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, PhysAddr);
MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
}
}
@ -42,7 +42,7 @@ MmAllocateContiguousAlignedMemory(IN ULONG NumberOfBytes,
PMEMORY_AREA MArea;
NTSTATUS Status;
PVOID BaseAddress = 0;
PHYSICAL_ADDRESS PBase;
PFN_TYPE PBase;
ULONG Attributes;
ULONG i;
@ -78,14 +78,7 @@ MmAllocateContiguousAlignedMemory(IN ULONG NumberOfBytes,
LowestAcceptableAddress,
HighestAcceptableAddress,
Alignment);
#if defined(__GNUC__)
if (PBase.QuadPart == 0LL)
#else
if (PBase.QuadPart == 0)
#endif
if (PBase == 0)
{
MmLockAddressSpace(MmGetKernelAddressSpace());
MmFreeMemoryArea(MmGetKernelAddressSpace(),
@ -96,22 +89,13 @@ MmAllocateContiguousAlignedMemory(IN ULONG NumberOfBytes,
MmUnlockAddressSpace(MmGetKernelAddressSpace());
return(NULL);
}
for (i = 0; i < (PAGE_ROUND_UP(NumberOfBytes) / 4096); i++)
for (i = 0; i < (PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE); i++, PBase++)
{
#if !defined(__GNUC__)
LARGE_INTEGER dummyJunkNeeded;
dummyJunkNeeded.QuadPart = PBase.QuadPart + (i * 4096);
#endif
MmCreateVirtualMapping(NULL,
(char*)BaseAddress + (i * 4096),
Attributes,
#if defined(__GNUC__)
(LARGE_INTEGER)(PBase.QuadPart + (i * 4096)),
#else
dummyJunkNeeded,
#endif
TRUE);
&PBase,
1);
}
return(BaseAddress);
}

View file

@ -68,30 +68,38 @@ static ULONG UnzeroedPageCount = 0;
/* FUNCTIONS *************************************************************/
VOID
MmTransferOwnershipPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG NewConsumer)
MmTransferOwnershipPage(PFN_TYPE Pfn, ULONG NewConsumer)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].MapCount != 0)
if (MmPageArray[Pfn].MapCount != 0)
{
DbgPrint("Transfering mapped page.\n");
KEBUGCHECK(0);
}
RemoveEntryList(&MmPageArray[Start].ListEntry);
if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DPRINT1("Type: %d\n", MmPageArray[Pfn].Flags.Type);
KEBUGCHECK(0);
}
if (MmPageArray[Pfn].ReferenceCount != 1)
{
DPRINT1("ReferenceCount: %d\n", MmPageArray[Pfn].ReferenceCount);
KEBUGCHECK(0);
}
RemoveEntryList(&MmPageArray[Pfn].ListEntry);
InsertTailList(&UsedPageListHeads[NewConsumer],
&MmPageArray[Start].ListEntry);
MmPageArray[Start].Flags.Consumer = NewConsumer;
&MmPageArray[Pfn].ListEntry);
MmPageArray[Pfn].Flags.Consumer = NewConsumer;
KeReleaseSpinLock(&PageListLock, oldIrql);
MiZeroPage(PhysicalAddress);
MiZeroPage(Pfn);
}
PHYSICAL_ADDRESS
PFN_TYPE
MmGetLRUFirstUserPage(VOID)
{
PLIST_ENTRY NextListEntry;
PHYSICAL_ADDRESS Next;
PHYSICAL_PAGE* PageDescriptor;
KIRQL oldIrql;
@ -100,90 +108,58 @@ MmGetLRUFirstUserPage(VOID)
if (NextListEntry == &UsedPageListHeads[MC_USER])
{
KeReleaseSpinLock(&PageListLock, oldIrql);
#if defined(__GNUC__)
return((PHYSICAL_ADDRESS)0LL);
#else
{
const PHYSICAL_ADDRESS dummyJunkNeeded =
{
0
};
return dummyJunkNeeded;
}
#endif
return 0;
}
PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
KeReleaseSpinLock(&PageListLock, oldIrql);
return(Next);
return PageDescriptor - MmPageArray;
}
VOID
MmSetLRULastPage(PHYSICAL_ADDRESS PhysicalAddress)
MmSetLRULastPage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
assert (Pfn < MmPageArraySize);
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_USED &&
MmPageArray[Start].Flags.Consumer == MC_USER)
if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_USED &&
MmPageArray[Pfn].Flags.Consumer == MC_USER)
{
RemoveEntryList(&MmPageArray[Start].ListEntry);
RemoveEntryList(&MmPageArray[Pfn].ListEntry);
InsertTailList(&UsedPageListHeads[MC_USER],
&MmPageArray[Start].ListEntry);
&MmPageArray[Pfn].ListEntry);
}
KeReleaseSpinLock(&PageListLock, oldIrql);
}
PHYSICAL_ADDRESS
MmGetLRUNextUserPage(PHYSICAL_ADDRESS PreviousPhysicalAddress)
PFN_TYPE
MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
{
ULONG Start = PreviousPhysicalAddress.u.LowPart / PAGE_SIZE;
PLIST_ENTRY NextListEntry;
PHYSICAL_ADDRESS Next;
PHYSICAL_PAGE* PageDescriptor;
KIRQL oldIrql;
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED ||
MmPageArray[Start].Flags.Consumer != MC_USER)
if (MmPageArray[PreviousPfn].Flags.Type != MM_PHYSICAL_PAGE_USED ||
MmPageArray[PreviousPfn].Flags.Consumer != MC_USER)
{
NextListEntry = UsedPageListHeads[MC_USER].Flink;
}
else
{
NextListEntry = MmPageArray[Start].ListEntry.Flink;
NextListEntry = MmPageArray[PreviousPfn].ListEntry.Flink;
}
if (NextListEntry == &UsedPageListHeads[MC_USER])
{
KeReleaseSpinLock(&PageListLock, oldIrql);
#if defined(__GNUC__)
return((PHYSICAL_ADDRESS)0LL);
#else
{
const PHYSICAL_ADDRESS dummyJunkNeeded =
{
0
};
return dummyJunkNeeded;
}
#endif
return 0;
}
PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
KeReleaseSpinLock(&PageListLock, oldIrql);
return(Next);
return PageDescriptor - MmPageArray;
}
PHYSICAL_ADDRESS
PFN_TYPE
MmGetContinuousPages(ULONG NumberOfBytes,
PHYSICAL_ADDRESS LowestAcceptableAddress,
PHYSICAL_ADDRESS HighestAcceptableAddress,
@ -191,7 +167,7 @@ MmGetContinuousPages(ULONG NumberOfBytes,
{
ULONG NrPages;
ULONG i;
LONG start;
ULONG start;
ULONG length;
KIRQL oldIrql;
@ -232,20 +208,7 @@ MmGetContinuousPages(ULONG NumberOfBytes,
if (start == -1 || length != NrPages)
{
KeReleaseSpinLock(&PageListLock, oldIrql);
#if defined(__GNUC__)
return((PHYSICAL_ADDRESS)(LONGLONG)0);
#else
{
const PHYSICAL_ADDRESS dummyJunkNeeded =
{
0
};
return dummyJunkNeeded;
}
#endif
return 0;
}
for (i = start; i < (start + length); i++)
{
@ -270,29 +233,15 @@ MmGetContinuousPages(ULONG NumberOfBytes,
{
if (MmPageArray[i].Flags.Zero == 0)
{
PHYSICAL_ADDRESS Page;
Page.QuadPart = i * PAGE_SIZE;
MiZeroPage(Page);
MiZeroPage(i);
}
else
{
MmPageArray[i].Flags.Zero = 0;
}
}
#if defined(__GNUC__)
return((PHYSICAL_ADDRESS)((LONGLONG)start * PAGE_SIZE));
#else
{
const PHYSICAL_ADDRESS dummyJunkNeeded =
{
start * PAGE_SIZE
};
return dummyJunkNeeded;
}
#endif
return start;
}
VOID INIT_FUNCTION
@ -445,26 +394,13 @@ MmInitializePageList(PVOID FirstPhysKernelAddress,
PVOID Address = (char*)(ULONG)MmPageArray + (i * PAGE_SIZE);
if (!MmIsPagePresent(NULL, Address))
{
#if !defined(__GNUC__)
const PHYSICAL_ADDRESS dummyJunkNeeded =
{
(ULONG)LastPhysKernelAddress -
(Reserved * PAGE_SIZE) + (i * PAGE_SIZE)
};
#endif
ULONG PhysicalAddress = (ULONG)LastPhysKernelAddress -
(Reserved * PAGE_SIZE) + (i * PAGE_SIZE);
ULONG Pfn = ((ULONG_PTR)LastPhysKernelAddress >> PAGE_SHIFT) - Reserved + i;
Status =
MmCreateVirtualMappingUnsafe(NULL,
Address,
PAGE_READWRITE,
#if defined(__GNUC__)
(PHYSICAL_ADDRESS)(LONGLONG)PhysicalAddress,
#else
dummyJunkNeeded,
#endif
FALSE);
&Pfn,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
@ -614,85 +550,78 @@ MmInitializePageList(PVOID FirstPhysKernelAddress,
}
VOID
MmSetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG Flags)
MmSetFlagsPage(PFN_TYPE Pfn, ULONG Flags)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
assert (Pfn < MmPageArraySize);
KeAcquireSpinLock(&PageListLock, &oldIrql);
MmPageArray[Start].AllFlags = Flags;
MmPageArray[Pfn].AllFlags = Flags;
KeReleaseSpinLock(&PageListLock, oldIrql);
}
VOID
MmSetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress,
struct _MM_RMAP_ENTRY* ListHead)
MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
MmPageArray[Start].RmapListHead = ListHead;
MmPageArray[Pfn].RmapListHead = ListHead;
}
struct _MM_RMAP_ENTRY*
MmGetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress)
MmGetRmapListHeadPage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
return(MmPageArray[Start].RmapListHead);
return(MmPageArray[Pfn].RmapListHead);
}
VOID
MmMarkPageMapped(PHYSICAL_ADDRESS PhysicalAddress)
MmMarkPageMapped(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
if (Start < MmPageArraySize)
if (Pfn < MmPageArraySize)
{
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_FREE)
if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_FREE)
{
DbgPrint("Mapping non-used page\n");
KEBUGCHECK(0);
}
MmPageArray[Start].MapCount++;
MmPageArray[Pfn].MapCount++;
KeReleaseSpinLock(&PageListLock, oldIrql);
}
}
VOID
MmMarkPageUnmapped(PHYSICAL_ADDRESS PhysicalAddress)
MmMarkPageUnmapped(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
if (Start < MmPageArraySize)
if (Pfn < MmPageArraySize)
{
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_FREE)
if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_FREE)
{
DbgPrint("Unmapping non-used page\n");
KEBUGCHECK(0);
}
if (MmPageArray[Start].MapCount == 0)
if (MmPageArray[Pfn].MapCount == 0)
{
DbgPrint("Unmapping not mapped page\n");
KEBUGCHECK(0);
}
MmPageArray[Start].MapCount--;
MmPageArray[Pfn].MapCount--;
KeReleaseSpinLock(&PageListLock, oldIrql);
}
}
ULONG
MmGetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress)
MmGetFlagsPage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
ULONG Flags;
assert (Pfn < MmPageArraySize);
KeAcquireSpinLock(&PageListLock, &oldIrql);
Flags = MmPageArray[Start].AllFlags;
Flags = MmPageArray[Pfn].AllFlags;
KeReleaseSpinLock(&PageListLock, oldIrql);
return(Flags);
@ -700,98 +629,94 @@ MmGetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress)
VOID
MmSetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress,
SWAPENTRY SavedSwapEntry)
MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SavedSwapEntry)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
assert (Pfn < MmPageArraySize);
KeAcquireSpinLock(&PageListLock, &oldIrql);
MmPageArray[Start].SavedSwapEntry = SavedSwapEntry;
MmPageArray[Pfn].SavedSwapEntry = SavedSwapEntry;
KeReleaseSpinLock(&PageListLock, oldIrql);
}
SWAPENTRY
MmGetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress)
MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
SWAPENTRY SavedSwapEntry;
KIRQL oldIrql;
assert (Pfn < MmPageArraySize);
KeAcquireSpinLock(&PageListLock, &oldIrql);
SavedSwapEntry = MmPageArray[Start].SavedSwapEntry;
SavedSwapEntry = MmPageArray[Pfn].SavedSwapEntry;
KeReleaseSpinLock(&PageListLock, oldIrql);
return(SavedSwapEntry);
}
VOID
MmReferencePage(PHYSICAL_ADDRESS PhysicalAddress)
MmReferencePage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
DPRINT("MmReferencePage(PhysicalAddress %x)\n", PhysicalAddress);
DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
if (PhysicalAddress.u.LowPart == 0)
if (Pfn == 0 || Pfn >= MmPageArraySize)
{
KEBUGCHECK(0);
}
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DbgPrint("Referencing non-used page\n");
KEBUGCHECK(0);
}
MmPageArray[Start].ReferenceCount++;
MmPageArray[Pfn].ReferenceCount++;
KeReleaseSpinLock(&PageListLock, oldIrql);
}
ULONG
MmGetReferenceCountPage(PHYSICAL_ADDRESS PhysicalAddress)
MmGetReferenceCountPage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
ULONG RCount;
DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
if (PhysicalAddress.u.LowPart == 0)
if (Pfn == 0 || Pfn >= MmPageArraySize)
{
KEBUGCHECK(0);
}
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DbgPrint("Getting reference count for free page\n");
KEBUGCHECK(0);
}
RCount = MmPageArray[Start].ReferenceCount;
RCount = MmPageArray[Pfn].ReferenceCount;
KeReleaseSpinLock(&PageListLock, oldIrql);
return(RCount);
}
BOOLEAN
MmIsUsablePage(PHYSICAL_ADDRESS PhysicalAddress)
MmIsUsablePage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
DPRINT("MmIsUsablePage(PhysicalAddress %x)\n", PhysicalAddress);
DPRINT("MmIsUsablePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
if (PhysicalAddress.u.LowPart == 0)
if (Pfn == 0 || Pfn >= MmPageArraySize)
{
KEBUGCHECK(0);
}
if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED &&
MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED &&
MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
{
return(FALSE);
}
@ -800,64 +725,67 @@ MmIsUsablePage(PHYSICAL_ADDRESS PhysicalAddress)
}
VOID
MmDereferencePage(PHYSICAL_ADDRESS PhysicalAddress)
MmDereferencePage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
DPRINT("MmDereferencePage(PhysicalAddress %I64x)\n", PhysicalAddress);
DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
if (PhysicalAddress.u.LowPart == 0)
if (Pfn == 0 || Pfn >= MmPageArraySize)
{
KEBUGCHECK(0);
}
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DbgPrint("Dereferencing free page\n");
KEBUGCHECK(0);
}
if (MmPageArray[Pfn].ReferenceCount == 0)
{
DbgPrint("Derefrencing page with reference count 0\n");
KEBUGCHECK(0);
}
MmPageArray[Start].ReferenceCount--;
if (MmPageArray[Start].ReferenceCount == 0)
MmPageArray[Pfn].ReferenceCount--;
if (MmPageArray[Pfn].ReferenceCount == 0)
{
MmStats.NrFreePages++;
MmStats.NrSystemPages--;
RemoveEntryList(&MmPageArray[Start].ListEntry);
if (MmPageArray[Start].RmapListHead != NULL)
RemoveEntryList(&MmPageArray[Pfn].ListEntry);
if (MmPageArray[Pfn].RmapListHead != NULL)
{
DbgPrint("Freeing page with rmap entries.\n");
KEBUGCHECK(0);
}
if (MmPageArray[Start].MapCount != 0)
if (MmPageArray[Pfn].MapCount != 0)
{
DbgPrint("Freeing mapped page (0x%I64x count %d)\n",
PhysicalAddress, MmPageArray[Start].MapCount);
DbgPrint("Freeing mapped page (0x%x count %d)\n",
Pfn << PAGE_SHIFT, MmPageArray[Pfn].MapCount);
KEBUGCHECK(0);
}
if (MmPageArray[Start].LockCount > 0)
if (MmPageArray[Pfn].LockCount > 0)
{
DbgPrint("Freeing locked page\n");
KEBUGCHECK(0);
}
if (MmPageArray[Start].SavedSwapEntry != 0)
if (MmPageArray[Pfn].SavedSwapEntry != 0)
{
DbgPrint("Freeing page with swap entry.\n");
KEBUGCHECK(0);
}
if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DbgPrint("Freeing page with flags %x\n",
MmPageArray[Start].Flags.Type);
MmPageArray[Pfn].Flags.Type);
KEBUGCHECK(0);
}
MmPageArray[Start].Flags.Type = MM_PHYSICAL_PAGE_FREE;
MmPageArray[Start].Flags.Zero = 0;
MmPageArray[Pfn].Flags.Type = MM_PHYSICAL_PAGE_FREE;
MmPageArray[Pfn].Flags.Consumer = MC_MAXIMUM;
InsertTailList(&FreeUnzeroedPageListHead,
&MmPageArray[Start].ListEntry);
&MmPageArray[Pfn].ListEntry);
UnzeroedPageCount++;
if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
{
@ -868,87 +796,84 @@ MmDereferencePage(PHYSICAL_ADDRESS PhysicalAddress)
}
ULONG
MmGetLockCountPage(PHYSICAL_ADDRESS PhysicalAddress)
MmGetLockCountPage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
ULONG LockCount;
DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", PhysicalAddress);
DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
if (PhysicalAddress.u.LowPart == 0)
if (Pfn == 0 || Pfn >= MmPageArraySize)
{
KEBUGCHECK(0);
}
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DbgPrint("Getting lock count for free page\n");
KEBUGCHECK(0);
}
LockCount = MmPageArray[Start].LockCount;
LockCount = MmPageArray[Pfn].LockCount;
KeReleaseSpinLock(&PageListLock, oldIrql);
return(LockCount);
}
VOID
MmLockPage(PHYSICAL_ADDRESS PhysicalAddress)
MmLockPage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
DPRINT("MmLockPage(PhysicalAddress %x)\n", PhysicalAddress);
DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
if (PhysicalAddress.u.LowPart == 0)
if (Pfn == 0 || Pfn >= MmPageArraySize)
{
KEBUGCHECK(0);
}
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DbgPrint("Locking free page\n");
KEBUGCHECK(0);
}
MmPageArray[Start].LockCount++;
MmPageArray[Pfn].LockCount++;
KeReleaseSpinLock(&PageListLock, oldIrql);
}
VOID
MmUnlockPage(PHYSICAL_ADDRESS PhysicalAddress)
MmUnlockPage(PFN_TYPE Pfn)
{
ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
KIRQL oldIrql;
DPRINT("MmUnlockPage(PhysicalAddress %I64x)\n", PhysicalAddress);
DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
if (PhysicalAddress.u.LowPart == 0)
if (Pfn == 0 || Pfn >= MmPageArraySize)
{
KEBUGCHECK(0);
}
KeAcquireSpinLock(&PageListLock, &oldIrql);
if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
DbgPrint("Unlocking free page\n");
KEBUGCHECK(0);
}
MmPageArray[Start].LockCount--;
MmPageArray[Pfn].LockCount--;
KeReleaseSpinLock(&PageListLock, oldIrql);
}
PHYSICAL_ADDRESS
PFN_TYPE
MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
{
PHYSICAL_ADDRESS PageOffset;
PFN_TYPE PfnOffset;
PLIST_ENTRY ListEntry;
PPHYSICAL_PAGE PageDescriptor;
KIRQL oldIrql;
@ -963,20 +888,7 @@ MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
{
DPRINT1("MmAllocPage(): Out of memory\n");
KeReleaseSpinLock(&PageListLock, oldIrql);
#if defined(__GNUC__)
return((PHYSICAL_ADDRESS)0LL);
#else
{
const PHYSICAL_ADDRESS dummyJunkNeeded =
{
0
};
return dummyJunkNeeded;
}
#endif
return 0;
}
ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
UnzeroedPageCount--;
@ -1002,8 +914,12 @@ MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
DbgPrint("Got mapped page from freelist\n");
KEBUGCHECK(0);
}
if (PageDescriptor->ReferenceCount != 0)
{
DPRINT1("%d\n", PageDescriptor->ReferenceCount);
KEBUGCHECK(0);
}
PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
PageDescriptor->Flags.Zero = 0;
PageDescriptor->Flags.Consumer = Consumer;
PageDescriptor->ReferenceCount = 1;
PageDescriptor->LockCount = 0;
@ -1016,19 +932,17 @@ MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
KeReleaseSpinLock(&PageListLock, oldIrql);
PageOffset.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
PageOffset.QuadPart =
(PageOffset.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
PfnOffset = PageDescriptor - MmPageArray;
if (NeedClear)
{
MiZeroPage(PageOffset);
MiZeroPage(PfnOffset);
}
if (PageDescriptor->MapCount != 0)
{
DbgPrint("Returning mapped page.\n");
KEBUGCHECK(0);
}
return(PageOffset);
return PfnOffset;
}
@ -1039,7 +953,7 @@ MmZeroPageThreadMain(PVOID Ignored)
KIRQL oldIrql;
PLIST_ENTRY ListEntry;
PPHYSICAL_PAGE PageDescriptor;
PHYSICAL_ADDRESS PhysPage;
PFN_TYPE Pfn;
static PVOID Address = NULL;
ULONG Count;
@ -1068,19 +982,18 @@ MmZeroPageThreadMain(PVOID Ignored)
PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
KeReleaseSpinLock(&PageListLock, oldIrql);
Count++;
PhysPage.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
PhysPage.QuadPart = (PhysPage.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
Pfn = PageDescriptor - MmPageArray;
if (Address == NULL)
{
Address = ExAllocatePageWithPhysPage(PhysPage);
Address = ExAllocatePageWithPhysPage(Pfn);
}
else
{
Status = MmCreateVirtualMapping(NULL,
Address,
PAGE_READWRITE | PAGE_SYSTEM,
PhysPage,
FALSE);
&Pfn,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: iospace.c,v 1.28 2004/05/20 08:37:20 hbirr Exp $
/* $Id: iospace.c,v 1.29 2004/08/01 07:24:58 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/iospace.c
@ -77,6 +77,7 @@ MmMapIoSpace (IN PHYSICAL_ADDRESS PhysicalAddress,
ULONG i;
ULONG Attributes;
PHYSICAL_ADDRESS BoundaryAddressMultiple;
PFN_TYPE Pfn;
DPRINT("MmMapIoSpace(%lx, %d, %d)\n", PhysicalAddress, NumberOfBytes, CacheEnable);
@ -116,11 +117,13 @@ MmMapIoSpace (IN PHYSICAL_ADDRESS PhysicalAddress,
{
Attributes |= (PAGE_NOCACHE | PAGE_WRITETHROUGH);
}
for (i = 0; i < PAGE_ROUND_UP(NumberOfBytes); i += PAGE_SIZE, PhysicalAddress.QuadPart += PAGE_SIZE)
Pfn = PhysicalAddress.QuadPart >> PAGE_SHIFT;
for (i = 0; i < PAGE_ROUND_UP(NumberOfBytes); i += PAGE_SIZE, Pfn++)
{
Status = MmCreateVirtualMappingForKernel((char*)Result + i,
Attributes,
PhysicalAddress);
&Pfn,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");

View file

@ -1,4 +1,4 @@
/* $Id: kmap.c,v 1.32 2004/04/10 22:35:25 gdalsnes Exp $
/* $Id: kmap.c,v 1.33 2004/08/01 07:24:58 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -54,24 +54,24 @@ ExUnmapPage(PVOID Addr)
PVOID
ExAllocatePage(VOID)
{
PHYSICAL_ADDRESS PhysPage;
PFN_TYPE Page;
NTSTATUS Status;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &PhysPage);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
if (!NT_SUCCESS(Status))
{
return(NULL);
}
return(ExAllocatePageWithPhysPage(PhysPage));
return(ExAllocatePageWithPhysPage(Page));
}
NTSTATUS
MiZeroPage(PHYSICAL_ADDRESS PhysPage)
MiZeroPage(PFN_TYPE Page)
{
PVOID TempAddress;
TempAddress = ExAllocatePageWithPhysPage(PhysPage);
TempAddress = ExAllocatePageWithPhysPage(Page);
if (TempAddress == NULL)
{
return(STATUS_NO_MEMORY);
@ -82,11 +82,11 @@ MiZeroPage(PHYSICAL_ADDRESS PhysPage)
}
NTSTATUS
MiCopyFromUserPage(PHYSICAL_ADDRESS DestPhysPage, PVOID SourceAddress)
MiCopyFromUserPage(PFN_TYPE DestPage, PVOID SourceAddress)
{
PVOID TempAddress;
TempAddress = ExAllocatePageWithPhysPage(DestPhysPage);
TempAddress = ExAllocatePageWithPhysPage(DestPage);
if (TempAddress == NULL)
{
return(STATUS_NO_MEMORY);
@ -97,7 +97,7 @@ MiCopyFromUserPage(PHYSICAL_ADDRESS DestPhysPage, PVOID SourceAddress)
}
PVOID
ExAllocatePageWithPhysPage(PHYSICAL_ADDRESS PhysPage)
ExAllocatePageWithPhysPage(PFN_TYPE Page)
{
KIRQL oldlvl;
PVOID Addr;
@ -114,8 +114,8 @@ ExAllocatePageWithPhysPage(PHYSICAL_ADDRESS PhysPage)
Status = MmCreateVirtualMapping(NULL,
Addr,
PAGE_READWRITE | PAGE_SYSTEM,
PhysPage,
TRUE);
&Page,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");

View file

@ -409,12 +409,12 @@ MmFreeMemoryArea(PMADDRESS_SPACE AddressSpace,
PVOID BaseAddress,
ULONG Length,
VOID (*FreePage)(PVOID Context, MEMORY_AREA* MemoryArea,
PVOID Address, PHYSICAL_ADDRESS PhysAddr,
PVOID Address, PFN_TYPE Page,
SWAPENTRY SwapEntry, BOOLEAN Dirty),
PVOID FreePageContext)
{
MEMORY_AREA* MemoryArea;
ULONG i;
PVOID Address, EndAddress;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
DPRINT("MmFreeMemoryArea(AddressSpace %x, BaseAddress %x, Length %x,"
@ -433,43 +433,33 @@ MmFreeMemoryArea(PMADDRESS_SPACE AddressSpace,
{
KeAttachProcess(AddressSpace->Process);
}
for (i=0; i<(PAGE_ROUND_UP(MemoryArea->Length)/PAGE_SIZE); i++)
EndAddress = MemoryArea->BaseAddress + PAGE_ROUND_UP(MemoryArea->Length);
for (Address = MemoryArea->BaseAddress; Address < EndAddress; Address += PAGE_SIZE)
{
#if defined(__GNUC__)
PHYSICAL_ADDRESS PhysAddr = (PHYSICAL_ADDRESS)0LL;
#else
PHYSICAL_ADDRESS PhysAddr = { 0 };
#endif
if (MemoryArea->Type == MEMORY_AREA_IO_MAPPING)
{
MmRawDeleteVirtualMapping((char*)MemoryArea->BaseAddress + (i * PAGE_SIZE));
MmRawDeleteVirtualMapping(Address);
}
else
{
BOOL Dirty = FALSE;
SWAPENTRY SwapEntry = 0;
PFN_TYPE Page = 0;
if (MmIsPageSwapEntry(AddressSpace->Process,
(char*)MemoryArea->BaseAddress + (i * PAGE_SIZE)))
if (MmIsPageSwapEntry(AddressSpace->Process, Address))
{
MmDeletePageFileMapping(AddressSpace->Process,
(char*)MemoryArea->BaseAddress + (i * PAGE_SIZE),
&SwapEntry);
MmDeletePageFileMapping(AddressSpace->Process, Address, &SwapEntry);
}
else
{
MmDeleteVirtualMapping(AddressSpace->Process,
(char*)MemoryArea->BaseAddress + (i*PAGE_SIZE),
FALSE, &Dirty, &PhysAddr);
MmDeleteVirtualMapping(AddressSpace->Process, Address, FALSE, &Dirty, &Page);
}
if (FreePage != NULL)
{
FreePage(FreePageContext, MemoryArea,
(char*)MemoryArea->BaseAddress + (i * PAGE_SIZE), PhysAddr,
SwapEntry, (BOOLEAN)Dirty);
FreePage(FreePageContext, MemoryArea, Address,
Page, SwapEntry, (BOOLEAN)Dirty);
}
}
}

View file

@ -1,4 +1,4 @@
/* $Id: mdl.c,v 1.65 2004/07/17 03:03:51 ion Exp $
/* $Id: mdl.c,v 1.66 2004/08/01 07:24:58 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -126,7 +126,7 @@ MmUnlockPages(PMDL Mdl)
{
ULONG i;
PULONG MdlPages;
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
/*
* MmProbeAndLockPages MUST have been called to lock this mdl!
@ -166,7 +166,7 @@ MmUnlockPages(PMDL Mdl)
MdlPages = (PULONG)(Mdl + 1);
for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
{
Page.QuadPart = MdlPages[i] << PAGE_SHIFT;
Page = MdlPages[i];
MmUnlockPage(Page);
MmDereferencePage(Page);
}
@ -195,13 +195,12 @@ MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
*/
{
PVOID Base;
ULONG i;
PULONG MdlPages;
KIRQL oldIrql;
ULONG PageCount;
ULONG StartingOffset;
PEPROCESS CurrentProcess;
PHYSICAL_ADDRESS Page;
NTSTATUS Status;
DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
@ -289,24 +288,20 @@ MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
/* Set the virtual mappings for the MDL pages. */
MdlPages = (PULONG)(Mdl + 1);
for (i = 0; i < PageCount; i++)
Status = MmCreateVirtualMapping(CurrentProcess,
Base,
PAGE_READWRITE,
MdlPages,
PageCount);
if (!NT_SUCCESS(Status))
{
NTSTATUS Status;
Page.QuadPart = MdlPages[i] << PAGE_SHIFT;
Status = MmCreateVirtualMapping(CurrentProcess,
(PVOID)((ULONG)Base+(i*PAGE_SIZE)),
PAGE_READWRITE,
Page,
FALSE);
if (!NT_SUCCESS(Status))
DbgPrint("Unable to create virtual mapping\n");
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
DbgPrint("Unable to create virtual mapping\n");
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
return NULL;
}
KEBUGCHECK(0);
return NULL;
}
KEBUGCHECK(0);
}
/* Mark the MDL has having being mapped. */
@ -449,18 +444,10 @@ MmUnmapReservedMapping (
VOID
MmBuildMdlFromPages(PMDL Mdl, PULONG Pages)
MmBuildMdlFromPages(PMDL Mdl, PPFN_TYPE Pages)
{
ULONG i;
PULONG MdlPages;
memcpy(Mdl + 1, Pages, sizeof(PFN_TYPE) * (PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE));
MdlPages = (PULONG)(Mdl + 1);
for (i=0;i<(PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE);i++)
{
MdlPages[i] = Pages[i] >> PAGE_SHIFT;
}
//FIXME: this flag should be set by the caller perhaps?
Mdl->MdlFlags |= MDL_IO_PAGE_READ;
}
@ -513,12 +500,12 @@ VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
* work no matter what kind of mdl address you have.
*/
{
PULONG MdlPages;
PPFN_TYPE MdlPages;
ULONG i, j;
ULONG NrPages;
NTSTATUS Status;
KPROCESSOR_MODE Mode;
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
@ -526,21 +513,21 @@ VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
assert(!(Mdl->MdlFlags & (MDL_PAGES_LOCKED|MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL|
MDL_IO_SPACE|MDL_SOURCE_IS_NONPAGED_POOL)));
MdlPages = (ULONG *)(Mdl + 1);
MdlPages = (PPFN_TYPE)(Mdl + 1);
NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
/* mdl must have enough page entries */
assert(NrPages <= (Mdl->Size - sizeof(MDL))/sizeof(ULONG));
assert(NrPages <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
if (Mdl->StartVa >= (PVOID)KERNEL_BASE &&
(MmGetPhysicalAddressForProcess(NULL, Mdl->StartVa).QuadPart >> PAGE_SHIFT) > MmPageArraySize)
MmGetPfnForProcess(NULL, Mdl->StartVa) > MmPageArraySize)
{
/* phys addr is not phys memory so this must be io memory */
for (i = 0; i < NrPages; i++)
{
MdlPages[i] = MmGetPhysicalAddressForProcess(NULL, (char*)Mdl->StartVa + (i*PAGE_SIZE)).QuadPart >> PAGE_SHIFT;
MdlPages[i] = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i*PAGE_SIZE));
}
Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE;
@ -585,7 +572,7 @@ VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
{
for (j = 0; j < i; j++)
{
Page.QuadPart = MdlPages[j] << PAGE_SHIFT;
Page = MdlPages[j];
MmUnlockPage(Page);
MmDereferencePage(Page);
}
@ -594,26 +581,26 @@ VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
}
else
{
MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
MmLockPage(MmGetPfnForProcess(NULL, Address));
}
if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
(!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
(!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
{
Status = MmAccessFault(Mode, (ULONG)Address, TRUE);
if (!NT_SUCCESS(Status))
{
for (j = 0; j < i; j++)
{
Page.QuadPart = (ULONGLONG)MdlPages[j] << PAGE_SHIFT;
Page = MdlPages[j];
MmUnlockPage(Page);
MmDereferencePage(Page);
}
ExRaiseStatus(Status);
}
}
Page = MmGetPhysicalAddressForProcess(NULL, Address);
MdlPages[i] = Page.QuadPart >> PAGE_SHIFT;
Page = MmGetPfnForProcess(NULL, Address);
MdlPages[i] = Page;
MmReferencePage(Page);
}
@ -668,7 +655,7 @@ ULONG STDCALL MmSizeOfMdl (PVOID Base,
len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
return(sizeof(MDL)+(len*sizeof(ULONG)));
return(sizeof(MDL)+(len*sizeof(PFN_TYPE)));
}
@ -690,6 +677,7 @@ MmBuildMdlForNonPagedPool (PMDL Mdl)
{
ULONG i;
ULONG PageCount;
PPFN_TYPE MdlPages;
/*
* mdl buffer must (at least) be in kernel space, thou this doesn't
@ -698,14 +686,14 @@ MmBuildMdlForNonPagedPool (PMDL Mdl)
assert((ULONG)Mdl->StartVa >= KERNEL_BASE);
PageCount = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
MdlPages = (PPFN_TYPE)(Mdl + 1);
/* mdl must have enough page entries */
assert(PageCount <= (Mdl->Size - sizeof(MDL))/sizeof(ULONG));
assert(PageCount <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
for (i=0; i < PageCount; i++)
{
((PULONG)(Mdl + 1))[i] =
(MmGetPhysicalAddress((char*)Mdl->StartVa + (i * PAGE_SIZE))).QuadPart >> PAGE_SHIFT;
*MdlPages++ = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i * PAGE_SIZE));
}
Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: mm.c,v 1.75 2004/07/17 03:03:51 ion Exp $
/* $Id: mm.c,v 1.76 2004/08/01 07:24:58 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -312,7 +312,7 @@ NTSTATUS MmAccessFault(KPROCESSOR_MODE Mode,
NTSTATUS MmCommitPagedPoolAddress(PVOID Address, BOOLEAN Locked)
{
NTSTATUS Status;
PHYSICAL_ADDRESS AllocatedPage;
PFN_TYPE AllocatedPage;
Status = MmRequestPageMemoryConsumer(MC_PPOOL, FALSE, &AllocatedPage);
if (!NT_SUCCESS(Status))
{
@ -324,19 +324,8 @@ NTSTATUS MmCommitPagedPoolAddress(PVOID Address, BOOLEAN Locked)
MmCreateVirtualMapping(NULL,
(PVOID)PAGE_ROUND_DOWN(Address),
PAGE_READWRITE,
AllocatedPage,
FALSE);
if (!NT_SUCCESS(Status))
{
MmUnlockAddressSpace(MmGetKernelAddressSpace());
Status =
MmCreateVirtualMapping(NULL,
(PVOID)PAGE_ROUND_DOWN(Address),
PAGE_READWRITE,
AllocatedPage,
FALSE);
MmLockAddressSpace(MmGetKernelAddressSpace());
}
&AllocatedPage,
1);
if (Locked)
{
MmLockPage(AllocatedPage);
@ -352,6 +341,7 @@ NTSTATUS MmNotPresentFault(KPROCESSOR_MODE Mode,
MEMORY_AREA* MemoryArea;
NTSTATUS Status;
BOOLEAN Locked = FromMdl;
PFN_TYPE Pfn;
DPRINT("MmNotPresentFault(Mode %d, Address %x)\n", Mode, Address);
@ -433,23 +423,13 @@ NTSTATUS MmNotPresentFault(KPROCESSOR_MODE Mode,
break;
case MEMORY_AREA_SHARED_DATA:
Pfn = MmSharedDataPagePhysicalAddress.QuadPart >> PAGE_SHIFT;
Status =
MmCreateVirtualMapping(PsGetCurrentProcess(),
(PVOID)PAGE_ROUND_DOWN(Address),
PAGE_READONLY,
MmSharedDataPagePhysicalAddress,
FALSE);
if (!NT_SUCCESS(Status))
{
MmUnlockAddressSpace(&PsGetCurrentProcess()->AddressSpace);
Status =
MmCreateVirtualMapping(PsGetCurrentProcess(),
(PVOID)PAGE_ROUND_DOWN(Address),
PAGE_READONLY,
MmSharedDataPagePhysicalAddress,
TRUE);
MmLockAddressSpace(&PsGetCurrentProcess()->AddressSpace);
}
&Pfn,
1);
break;
default:

View file

@ -1,4 +1,4 @@
/* $Id: mminit.c,v 1.63 2004/04/10 22:35:25 gdalsnes Exp $
/* $Id: mminit.c,v 1.64 2004/08/01 07:24:58 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -95,7 +95,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
ULONG ParamLength = KernelLength;
NTSTATUS Status;
PHYSICAL_ADDRESS BoundaryAddressMultiple;
//ULONG i;
PFN_TYPE Pfn;
DPRINT("MmInitVirtualMemory(%x, %x)\n",LastKernelAddress, KernelLength);
@ -281,13 +281,13 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
FALSE,
FALSE,
BoundaryAddressMultiple);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE,
&MmSharedDataPagePhysicalAddress);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Pfn);
MmSharedDataPagePhysicalAddress.QuadPart = Pfn << PAGE_SHIFT;
Status = MmCreateVirtualMapping(NULL,
(PVOID)KI_USER_SHARED_DATA,
PAGE_READWRITE,
MmSharedDataPagePhysicalAddress,
TRUE);
&Pfn,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
@ -499,13 +499,13 @@ MmInit3(VOID)
VOID STATIC
MiFreeInitMemoryPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry,
PFN_TYPE Page, SWAPENTRY SwapEntry,
BOOLEAN Dirty)
{
assert(SwapEntry == 0);
if (PhysAddr.QuadPart != 0)
if (Page != 0)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, PhysAddr);
MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
}
}

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: mpw.c,v 1.18 2004/04/10 22:35:25 gdalsnes Exp $
/* $Id: mpw.c,v 1.19 2004/08/01 07:24:58 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/mpw.c
@ -49,19 +49,12 @@ static volatile BOOLEAN MpwThreadShouldTerminate;
NTSTATUS STDCALL
MmWriteDirtyPages(ULONG Target, PULONG Actual)
{
PHYSICAL_ADDRESS Page;
PHYSICAL_ADDRESS NextPage;
PFN_TYPE Page;
PFN_TYPE NextPage;
NTSTATUS Status;
Page = MmGetLRUFirstUserPage();
#if defined(__GNUC__)
while (Page.QuadPart != 0LL && Target > 0)
#else
while (Page.QuadPart && Target > 0)
#endif
while (Page != 0 && Target > 0)
{
/*
* FIXME: While the current page is write back it is possible

View file

@ -1,4 +1,4 @@
/* $Id: ncache.c,v 1.28 2004/04/10 22:35:25 gdalsnes Exp $
/* $Id: ncache.c,v 1.29 2004/08/01 07:24:58 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -78,27 +78,27 @@ MmAllocateNonCachedMemory(IN ULONG NumberOfBytes)
PAGE_WRITETHROUGH;
for (i = 0; i < (PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE); i++)
{
PHYSICAL_ADDRESS NPage;
PFN_TYPE NPage;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &NPage);
MmCreateVirtualMapping (NULL,
(char*)Result + (i * PAGE_SIZE),
Attributes,
NPage,
TRUE);
&NPage,
1);
}
return ((PVOID)Result);
}
VOID STATIC
MmFreeNonCachedPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry,
PFN_TYPE Page, SWAPENTRY SwapEntry,
BOOLEAN Dirty)
{
assert(SwapEntry == 0);
if (PhysAddr.QuadPart != 0)
if (Page != 0)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, PhysAddr);
MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
}
}

View file

@ -1,4 +1,4 @@
/* $Id: npool.c,v 1.85 2004/04/10 22:35:25 gdalsnes Exp $
/* $Id: npool.c,v 1.86 2004/08/01 07:24:58 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -1248,54 +1248,58 @@ static BOOLEAN
grow_block(BLOCK_HDR* blk, PVOID end)
{
NTSTATUS Status;
PHYSICAL_ADDRESS Page;
BOOLEAN result = TRUE;
ULONG index;
PFN_TYPE Page[32];
ULONG StartIndex, EndIndex;
ULONG i, j, k;
StartIndex = (ULONG)((char*)(PVOID)PAGE_ROUND_UP((ULONG)((char*)blk + BLOCK_HDR_SIZE)) - (char*)MiNonPagedPoolStart) / PAGE_SIZE;
EndIndex = (ULONG)((char*)PAGE_ROUND_UP(end) - (char*)MiNonPagedPoolStart) / PAGE_SIZE;
PVOID start = (PVOID)PAGE_ROUND_UP((ULONG)((char*)blk + BLOCK_HDR_SIZE));
end = (PVOID)PAGE_ROUND_UP(end);
index = (ULONG)((char*)start - (char*)MiNonPagedPoolStart) / PAGE_SIZE;
while (start < end)
for (i = StartIndex; i < EndIndex; i++)
{
if (!(MiNonPagedPoolAllocMap[index / 32] & (1 << (index % 32))))
if (!(MiNonPagedPoolAllocMap[i / 32] & (1 << (i % 32))))
{
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
if (!NT_SUCCESS(Status))
{
result = FALSE;
break;
}
for (j = i + 1; j < EndIndex && j - i < 32; j++)
{
if (MiNonPagedPoolAllocMap[j / 32] & (1 << (j % 32)))
{
break;
}
}
for (k = 0; k < j - i; k++)
{
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page[k]);
if (!NT_SUCCESS(Status))
{
for (i = 0; i < k; i++)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
}
return FALSE;
}
}
Status = MmCreateVirtualMapping(NULL,
start,
MiNonPagedPoolStart + i * PAGE_SIZE,
PAGE_READWRITE|PAGE_SYSTEM,
Page,
FALSE);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
result = FALSE;
break;
}
MiNonPagedPoolAllocMap[index / 32] |= (1 << (index % 32));
memset(start, 0xcc, PAGE_SIZE);
MiNonPagedPoolNrOfPages++;
k);
if (!NT_SUCCESS(Status))
{
for (i = 0; i < k; i++)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, Page[i]);
}
return FALSE;
}
for (j = i; j < k + i; j++)
{
MiNonPagedPoolAllocMap[j / 32] |= (1 << (j % 32));
}
i += k - 1;
}
index++;
#if defined(__GNUC__)
start += PAGE_SIZE;
#else
{
char* pTemp = start;
pTemp += PAGE_SIZE;
start = pTemp;
}
#endif
}
return result;
return TRUE;
}
static BLOCK_HDR* get_block(unsigned int size, unsigned long alignment)
@ -1729,7 +1733,7 @@ VOID INIT_FUNCTION
MiInitializeNonPagedPool(VOID)
{
NTSTATUS Status;
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
ULONG i;
PVOID Address;
#ifdef WHOLE_PAGE_ALLOCATIONS
@ -1793,8 +1797,8 @@ MiInitializeNonPagedPool(VOID)
Status = MmCreateVirtualMapping(NULL,
Address,
PAGE_READWRITE|PAGE_SYSTEM,
Page,
FALSE);
&Page,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: pagefile.c,v 1.48 2004/06/19 08:53:35 vizzini Exp $
/* $Id: pagefile.c,v 1.49 2004/08/01 07:24:58 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/pagefile.c
@ -191,7 +191,7 @@ MmGetOffsetPageFile(PGET_RETRIEVAL_DESCRIPTOR RetrievalPointers, LARGE_INTEGER O
#endif
}
NTSTATUS MmWriteToSwapPage(SWAPENTRY SwapEntry, PHYSICAL_ADDRESS* Page)
NTSTATUS MmWriteToSwapPage(SWAPENTRY SwapEntry, PFN_TYPE Page)
{
ULONG i, offset;
LARGE_INTEGER file_offset;
@ -225,7 +225,7 @@ NTSTATUS MmWriteToSwapPage(SWAPENTRY SwapEntry, PHYSICAL_ADDRESS* Page)
}
MmInitializeMdl(Mdl, NULL, PAGE_SIZE);
MmBuildMdlFromPages(Mdl, (PULONG)Page);
MmBuildMdlFromPages(Mdl, &Page);
file_offset.QuadPart = offset * PAGE_SIZE;
file_offset = MmGetOffsetPageFile(PagingFileList[i]->RetrievalPointers, file_offset);
@ -245,7 +245,7 @@ NTSTATUS MmWriteToSwapPage(SWAPENTRY SwapEntry, PHYSICAL_ADDRESS* Page)
return(Status);
}
NTSTATUS MmReadFromSwapPage(SWAPENTRY SwapEntry, PHYSICAL_ADDRESS* Page)
NTSTATUS MmReadFromSwapPage(SWAPENTRY SwapEntry, PFN_TYPE Page)
{
ULONG i, offset;
LARGE_INTEGER file_offset;
@ -279,7 +279,7 @@ NTSTATUS MmReadFromSwapPage(SWAPENTRY SwapEntry, PHYSICAL_ADDRESS* Page)
}
MmInitializeMdl(Mdl, NULL, PAGE_SIZE);
MmBuildMdlFromPages(Mdl, (PULONG)Page);
MmBuildMdlFromPages(Mdl, &Page);
file_offset.QuadPart = offset * PAGE_SIZE;
file_offset = MmGetOffsetPageFile(PagingFileList[i]->RetrievalPointers, file_offset);
@ -591,12 +591,11 @@ MmDumpToPagingFile(ULONG BugCode,
{
for (i = 0; i < MmStats.NrTotalPages; i++)
{
LARGE_INTEGER PhysicalAddress;
PhysicalAddress.QuadPart = i * PAGE_SIZE;
MdlMap[0] = i;
MmCreateVirtualMappingForKernel(MmCoreDumpPageFrame,
PAGE_READWRITE,
PhysicalAddress);
MdlMap,
1);
#if defined(__GNUC__)
DiskOffset = MmGetOffsetPageFile(RetrievalPointers,

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: rmap.c,v 1.28 2004/04/10 22:35:25 gdalsnes Exp $
/* $Id: rmap.c,v 1.29 2004/08/01 07:24:58 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -69,7 +69,7 @@ MmInitializeRmapList(VOID)
}
NTSTATUS
MmWritePagePhysicalAddress(PHYSICAL_ADDRESS PhysicalAddress)
MmWritePagePhysicalAddress(PFN_TYPE Page)
{
PMM_RMAP_ENTRY entry;
PMEMORY_AREA MemoryArea;
@ -86,7 +86,7 @@ MmWritePagePhysicalAddress(PHYSICAL_ADDRESS PhysicalAddress)
* process so it isn't freed while we are working.
*/
ExAcquireFastMutex(&RmapListLock);
entry = MmGetRmapListHeadPage(PhysicalAddress);
entry = MmGetRmapListHeadPage(Page);
if (entry == NULL)
{
ExReleaseFastMutex(&RmapListLock);
@ -202,7 +202,7 @@ MmWritePagePhysicalAddress(PHYSICAL_ADDRESS PhysicalAddress)
}
NTSTATUS
MmPageOutPhysicalAddress(PHYSICAL_ADDRESS PhysicalAddress)
MmPageOutPhysicalAddress(PFN_TYPE Page)
{
PMM_RMAP_ENTRY entry;
PMEMORY_AREA MemoryArea;
@ -215,8 +215,8 @@ MmPageOutPhysicalAddress(PHYSICAL_ADDRESS PhysicalAddress)
NTSTATUS Status = STATUS_SUCCESS;
ExAcquireFastMutex(&RmapListLock);
entry = MmGetRmapListHeadPage(PhysicalAddress);
if (entry == NULL || MmGetLockCountPage(PhysicalAddress) != 0)
entry = MmGetRmapListHeadPage(Page);
if (entry == NULL || MmGetLockCountPage(Page) != 0)
{
ExReleaseFastMutex(&RmapListLock);
return(STATUS_UNSUCCESSFUL);
@ -324,12 +324,12 @@ MmPageOutPhysicalAddress(PHYSICAL_ADDRESS PhysicalAddress)
}
VOID
MmSetCleanAllRmaps(PHYSICAL_ADDRESS PhysicalAddress)
MmSetCleanAllRmaps(PFN_TYPE Page)
{
PMM_RMAP_ENTRY current_entry;
ExAcquireFastMutex(&RmapListLock);
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
current_entry = MmGetRmapListHeadPage(Page);
if (current_entry == NULL)
{
DPRINT1("MmIsDirtyRmap: No rmaps.\n");
@ -344,12 +344,12 @@ MmSetCleanAllRmaps(PHYSICAL_ADDRESS PhysicalAddress)
}
VOID
MmSetDirtyAllRmaps(PHYSICAL_ADDRESS PhysicalAddress)
MmSetDirtyAllRmaps(PFN_TYPE Page)
{
PMM_RMAP_ENTRY current_entry;
ExAcquireFastMutex(&RmapListLock);
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
current_entry = MmGetRmapListHeadPage(Page);
if (current_entry == NULL)
{
DPRINT1("MmIsDirtyRmap: No rmaps.\n");
@ -364,12 +364,12 @@ MmSetDirtyAllRmaps(PHYSICAL_ADDRESS PhysicalAddress)
}
BOOL
MmIsDirtyPageRmap(PHYSICAL_ADDRESS PhysicalAddress)
MmIsDirtyPageRmap(PFN_TYPE Page)
{
PMM_RMAP_ENTRY current_entry;
ExAcquireFastMutex(&RmapListLock);
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
current_entry = MmGetRmapListHeadPage(Page);
if (current_entry == NULL)
{
ExReleaseFastMutex(&RmapListLock);
@ -389,7 +389,7 @@ MmIsDirtyPageRmap(PHYSICAL_ADDRESS PhysicalAddress)
}
VOID
MmInsertRmap(PHYSICAL_ADDRESS PhysicalAddress, PEPROCESS Process,
MmInsertRmap(PFN_TYPE Page, PEPROCESS Process,
PVOID Address)
{
PMM_RMAP_ENTRY current_entry;
@ -405,25 +405,24 @@ MmInsertRmap(PHYSICAL_ADDRESS PhysicalAddress, PEPROCESS Process,
new_entry->Address = Address;
new_entry->Process = Process;
if (MmGetPhysicalAddressForProcess(Process, Address).QuadPart !=
PhysicalAddress.QuadPart)
if (MmGetPfnForProcess(Process, Address) != Page)
{
DPRINT1("Insert rmap (%d, 0x%.8X) 0x%.8X which doesn't match physical "
"address 0x%.8X\n", Process->UniqueProcessId, Address,
MmGetPhysicalAddressForProcess(Process, Address).u.LowPart,
PhysicalAddress.u.LowPart);
MmGetPfnForProcess(Process, Address) << PAGE_SHIFT,
Page << PAGE_SHIFT);
KEBUGCHECK(0);
}
ExAcquireFastMutex(&RmapListLock);
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
current_entry = MmGetRmapListHeadPage(Page);
new_entry->Next = current_entry;
MmSetRmapListHeadPage(PhysicalAddress, new_entry);
MmSetRmapListHeadPage(Page, new_entry);
ExReleaseFastMutex(&RmapListLock);
}
VOID
MmDeleteAllRmaps(PHYSICAL_ADDRESS PhysicalAddress, PVOID Context,
MmDeleteAllRmaps(PFN_TYPE Page, PVOID Context,
VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process,
PVOID Address))
{
@ -431,13 +430,13 @@ MmDeleteAllRmaps(PHYSICAL_ADDRESS PhysicalAddress, PVOID Context,
PMM_RMAP_ENTRY previous_entry;
ExAcquireFastMutex(&RmapListLock);
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
current_entry = MmGetRmapListHeadPage(Page);
if (current_entry == NULL)
{
DPRINT1("MmDeleteAllRmaps: No rmaps.\n");
KEBUGCHECK(0);
}
MmSetRmapListHeadPage(PhysicalAddress, NULL);
MmSetRmapListHeadPage(Page, NULL);
while (current_entry != NULL)
{
previous_entry = current_entry;
@ -453,14 +452,14 @@ MmDeleteAllRmaps(PHYSICAL_ADDRESS PhysicalAddress, PVOID Context,
}
VOID
MmDeleteRmap(PHYSICAL_ADDRESS PhysicalAddress, PEPROCESS Process,
MmDeleteRmap(PFN_TYPE Page, PEPROCESS Process,
PVOID Address)
{
PMM_RMAP_ENTRY current_entry, previous_entry;
ExAcquireFastMutex(&RmapListLock);
previous_entry = NULL;
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
current_entry = MmGetRmapListHeadPage(Page);
while (current_entry != NULL)
{
if (current_entry->Process == Process &&
@ -468,7 +467,7 @@ MmDeleteRmap(PHYSICAL_ADDRESS PhysicalAddress, PEPROCESS Process,
{
if (previous_entry == NULL)
{
MmSetRmapListHeadPage(PhysicalAddress, current_entry->Next);
MmSetRmapListHeadPage(Page, current_entry->Next);
}
else
{

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: section.c,v 1.154 2004/07/17 03:03:52 ion Exp $
/* $Id: section.c,v 1.155 2004/08/01 07:24:58 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/section.c
@ -71,6 +71,7 @@ static GENERIC_MAPPING MmpSectionMapping = {
#define TAG_SECTION_PAGE_TABLE TAG('M', 'S', 'P', 'T')
#define PAGE_FROM_SSE(E) ((E) & 0xFFFFF000)
#define PFN_FROM_SSE(E) ((E) >> PAGE_SHIFT)
#define SHARE_COUNT_FROM_SSE(E) (((E) & 0x00000FFE) >> 1)
#define IS_SWAP_FROM_SSE(E) ((E) & 0x00000001)
#define MAX_SHARE_COUNT 0x7FF
@ -327,7 +328,7 @@ MmUnsharePageEntrySectionSegment(PSECTION_OBJECT Section,
PFILE_OBJECT FileObject;
PBCB Bcb;
SWAPENTRY SavedSwapEntry;
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
BOOLEAN IsImageSection;
ULONG FileOffset;
@ -335,7 +336,7 @@ MmUnsharePageEntrySectionSegment(PSECTION_OBJECT Section,
IsImageSection = Section->AllocationAttributes & SEC_IMAGE ? TRUE : FALSE;
Page.QuadPart = (LONGLONG)PAGE_FROM_SSE(Entry);
Page = PFN_FROM_SSE(Entry);
FileObject = Section->FileObject;
if (FileObject != NULL &&
!(Segment->Characteristics & IMAGE_SECTION_CHAR_SHARED))
@ -396,7 +397,7 @@ MmUnsharePageEntrySectionSegment(PSECTION_OBJECT Section,
* process and the current segment (also not within an other process).
*/
NTSTATUS Status;
Status = MmWriteToSwapPage(SavedSwapEntry, &Page);
Status = MmWriteToSwapPage(SavedSwapEntry, Page);
if (!NT_SUCCESS(Status))
{
DPRINT1("MM: Failed to write to swap page (Status was 0x%.8X)\n", Status);
@ -443,7 +444,7 @@ BOOL MiIsPageFromCache(PMEMORY_AREA MemoryArea,
NTSTATUS
MiReadPage(PMEMORY_AREA MemoryArea,
ULONG SegOffset,
PHYSICAL_ADDRESS* Page)
PPFN_TYPE Page)
/*
* FUNCTION: Read a page for a section backed memory area.
* PARAMETERS:
@ -480,8 +481,8 @@ MiReadPage(PMEMORY_AREA MemoryArea,
* then get the related cache segment.
*/
if ((FileOffset % PAGE_SIZE) == 0 &&
(SegOffset + PAGE_SIZE <= RawLength || !IsImageSection) &&
!(MemoryArea->Data.SectionData.Segment->Characteristics & IMAGE_SECTION_CHAR_SHARED))
(SegOffset + PAGE_SIZE <= RawLength || !IsImageSection) &&
!(MemoryArea->Data.SectionData.Segment->Characteristics & IMAGE_SECTION_CHAR_SHARED))
{
/*
@ -516,7 +517,7 @@ MiReadPage(PMEMORY_AREA MemoryArea,
* Retrieve the page from the cache segment that we actually want.
*/
(*Page) = MmGetPhysicalAddress((char*)BaseAddress +
FileOffset - BaseOffset);
FileOffset - BaseOffset).QuadPart >> PAGE_SHIFT;
CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, FALSE, TRUE);
}
@ -618,7 +619,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
BOOLEAN Locked)
{
ULONG Offset;
LARGE_INTEGER Page;
PFN_TYPE Page;
NTSTATUS Status;
ULONG PAddress;
PSECTION_OBJECT Section;
@ -639,7 +640,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
{
if (Locked)
{
MmLockPage(MmGetPhysicalAddressForProcess(AddressSpace->Process, Address));
MmLockPage(MmGetPfnForProcess(AddressSpace->Process, Address));
}
return(STATUS_SUCCESS);
}
@ -661,8 +662,8 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
* Check if this page needs to be mapped COW
*/
if ((Segment->WriteCopy || MemoryArea->Data.SectionData.WriteCopyView) &&
(Region->Protect == PAGE_READWRITE ||
Region->Protect == PAGE_EXECUTE_READWRITE))
(Region->Protect == PAGE_READWRITE ||
Region->Protect == PAGE_EXECUTE_READWRITE))
{
Attributes = Region->Protect == PAGE_READWRITE ? PAGE_READONLY : PAGE_EXECUTE_READ;
}
@ -744,26 +745,15 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
return(STATUS_MM_RESTART_OPERATION);
}
Page.QuadPart = (LONGLONG)(PAGE_FROM_SSE(Entry));
Page = PFN_FROM_SSE(Entry);
MmSharePageEntrySectionSegment(Segment, Offset);
Status = MmCreateVirtualMapping(MemoryArea->Process,
Address,
Attributes,
Page,
FALSE);
if (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(MemoryArea->Process,
Address,
Attributes,
Page,
TRUE);
MmLockAddressSpace(AddressSpace);
}
&Page,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
@ -809,7 +799,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
KEBUGCHECK(0);
}
Status = MmReadFromSwapPage(SwapEntry, &Page);
Status = MmReadFromSwapPage(SwapEntry, Page);
if (!NT_SUCCESS(Status))
{
DPRINT1("MmReadFromSwapPage failed, status = %x\n", Status);
@ -819,18 +809,8 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
Page,
FALSE);
if (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
Page,
TRUE);
MmLockAddressSpace(AddressSpace);
}
&Page,
1);
if (!NT_SUCCESS(Status))
{
DPRINT("MmCreateVirtualMapping failed, not out of memory\n");
@ -853,7 +833,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
*/
if (Locked)
{
MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
MmLockPage(Page);
}
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
@ -870,22 +850,12 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
/*
* Just map the desired physical page
*/
Page.QuadPart = Offset + MemoryArea->Data.SectionData.ViewOffset;
Page = (Offset + MemoryArea->Data.SectionData.ViewOffset) >> PAGE_SHIFT;
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
Page,
FALSE);
if (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
Page,
TRUE);
MmLockAddressSpace(AddressSpace);
}
&Page,
1);
if (!NT_SUCCESS(Status))
{
DPRINT("MmCreateVirtualMapping failed, not out of memory\n");
@ -930,19 +900,8 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
Page,
FALSE);
if (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
Page,
TRUE);
MmLockAddressSpace(AddressSpace);
}
&Page,
1);
if (!NT_SUCCESS(Status))
{
DPRINT("MmCreateVirtualMapping failed, not out of memory\n");
@ -983,7 +942,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
MmUnlockAddressSpace(AddressSpace);
if ((Segment->Flags & MM_PAGEFILE_SEGMENT) ||
(Offset >= PAGE_ROUND_UP(Segment->RawLength) && Section->AllocationAttributes & SEC_IMAGE))
(Offset >= PAGE_ROUND_UP(Segment->RawLength) && Section->AllocationAttributes & SEC_IMAGE))
{
Status = MmRequestPageMemoryConsumer(MC_USER, TRUE, &Page);
if (!NT_SUCCESS(Status))
@ -1034,25 +993,15 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
* Mark the offset within the section as having valid, in-memory
* data
*/
Entry = MAKE_SSE(Page.u.LowPart, 1);
Entry = MAKE_SSE(Page << PAGE_SHIFT, 1);
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
MmUnlockSectionSegment(Segment);
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Attributes,
Page,
FALSE);
if (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Attributes,
Page,
TRUE);
MmLockAddressSpace(AddressSpace);
}
&Page,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
@ -1088,7 +1037,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
KEBUGCHECK(0);
}
Status = MmReadFromSwapPage(SwapEntry, &Page);
Status = MmReadFromSwapPage(SwapEntry, Page);
if (!NT_SUCCESS(Status))
{
KEBUGCHECK(0);
@ -1115,7 +1064,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
* Mark the offset within the section as having valid, in-memory
* data
*/
Entry = MAKE_SSE(Page.u.LowPart, 1);
Entry = MAKE_SSE(Page << PAGE_SHIFT, 1);
MmSetPageEntrySectionSegment(Segment, Offset, Entry);
MmUnlockSectionSegment(Segment);
@ -1126,18 +1075,8 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
Page,
FALSE);
if (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
Page,
TRUE);
MmLockAddressSpace(AddressSpace);
}
&Page,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
@ -1160,7 +1099,7 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
* take another reference to the page
*/
Page.QuadPart = (LONGLONG)PAGE_FROM_SSE(Entry);
Page = PFN_FROM_SSE(Entry);
MmSharePageEntrySectionSegment(Segment, Offset);
MmUnlockSectionSegment(Segment);
@ -1168,18 +1107,8 @@ MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Attributes,
Page,
FALSE);
if (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Attributes,
Page,
TRUE);
MmLockAddressSpace(AddressSpace);
}
&Page,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
@ -1205,8 +1134,8 @@ MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
{
PMM_SECTION_SEGMENT Segment;
PSECTION_OBJECT Section;
PHYSICAL_ADDRESS OldPage;
PHYSICAL_ADDRESS NewPage;
PFN_TYPE OldPage;
PFN_TYPE NewPage;
PVOID NewAddress;
NTSTATUS Status;
ULONG PAddress;
@ -1241,7 +1170,7 @@ MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
*/
MmLockSectionSegment(Segment);
OldPage = MmGetPhysicalAddressForProcess(NULL, Address);
OldPage = MmGetPfnForProcess(NULL, Address);
Entry = MmGetPageEntrySectionSegment(Segment, Offset);
MmUnlockSectionSegment(Segment);
@ -1258,7 +1187,7 @@ MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
}
if (IS_SWAP_FROM_SSE(Entry) ||
PAGE_FROM_SSE(Entry) != OldPage.u.LowPart)
PFN_FROM_SSE(Entry) != OldPage)
{
/* This is a private page. We must only change the page protection. */
MmSetPageProtect(AddressSpace->Process, (PVOID)PAddress, Region->Protect);
@ -1339,18 +1268,8 @@ MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
NewPage,
FALSE);
if (Status == STATUS_NO_MEMORY)
{
MmUnlockAddressSpace(AddressSpace);
Status = MmCreateVirtualMapping(AddressSpace->Process,
Address,
Region->Protect,
NewPage,
TRUE);
MmLockAddressSpace(AddressSpace);
}
&NewPage,
1);
if (!NT_SUCCESS(Status))
{
DPRINT("MmCreateVirtualMapping failed, not out of memory\n");
@ -1388,7 +1307,7 @@ MmPageOutDeleteMapping(PVOID Context, PEPROCESS Process, PVOID Address)
{
MM_SECTION_PAGEOUT_CONTEXT* PageOutContext;
BOOL WasDirty;
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
PageOutContext = (MM_SECTION_PAGEOUT_CONTEXT*)Context;
MmDeleteVirtualMapping(Process,
@ -1422,7 +1341,7 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
PVOID Address,
PMM_PAGEOP PageOp)
{
PHYSICAL_ADDRESS PhysicalAddress;
PFN_TYPE Page;
MM_SECTION_PAGEOUT_CONTEXT Context;
SWAPENTRY SwapEntry;
ULONG Entry;
@ -1449,7 +1368,7 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
FileObject = Context.Section->FileObject;
DirectMapped = FALSE;
if (FileObject != NULL &&
!(Context.Segment->Characteristics & IMAGE_SECTION_CHAR_SHARED))
!(Context.Segment->Characteristics & IMAGE_SECTION_CHAR_SHARED))
{
Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
@ -1488,9 +1407,8 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
AddressSpace->Process ? AddressSpace->Process->UniqueProcessId : 0, Address);
KEBUGCHECK(0);
}
PhysicalAddress =
MmGetPhysicalAddressForProcess(AddressSpace->Process, Address);
SwapEntry = MmGetSavedSwapEntryPage(PhysicalAddress);
Page = MmGetPfnForProcess(AddressSpace->Process, Address);
SwapEntry = MmGetSavedSwapEntryPage(Page);
/*
* Prepare the context structure for the rmap delete call.
@ -1498,7 +1416,7 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
Context.WasDirty = FALSE;
if (Context.Segment->Characteristics & IMAGE_SECTION_CHAR_BSS ||
IS_SWAP_FROM_SSE(Entry) ||
(LONGLONG)PAGE_FROM_SSE(Entry) != PhysicalAddress.QuadPart)
PFN_FROM_SSE(Entry) != Page)
{
Context.Private = TRUE;
}
@ -1520,10 +1438,10 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
}
else
{
MmReferencePage(PhysicalAddress);
MmReferencePage(Page);
}
MmDeleteAllRmaps(PhysicalAddress, (PVOID)&Context, MmPageOutDeleteMapping);
MmDeleteAllRmaps(Page, (PVOID)&Context, MmPageOutDeleteMapping);
/*
* If this wasn't a private page then we should have reduced the entry to
@ -1545,7 +1463,7 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
* If the page is from a pagefile section and has no swap entry,
* we can't free the page at this point.
*/
SwapEntry = MmGetSavedSwapEntryPage(PhysicalAddress);
SwapEntry = MmGetSavedSwapEntryPage(Page);
if (Context.Segment->Flags & MM_PAGEFILE_SEGMENT)
{
if (Context.Private)
@ -1556,9 +1474,9 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
}
if (!Context.WasDirty && SwapEntry != 0)
{
MmSetSavedSwapEntryPage(PhysicalAddress, 0);
MmSetSavedSwapEntryPage(Page, 0);
MmSetPageEntrySectionSegment(Context.Segment, Context.Offset, MAKE_SWAP_SSE(SwapEntry));
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
MmReleasePageMemoryConsumer(MC_USER, Page);
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
return(STATUS_SUCCESS);
@ -1574,12 +1492,12 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
}
if (!Context.WasDirty || SwapEntry != 0)
{
MmSetSavedSwapEntryPage(PhysicalAddress, 0);
MmSetSavedSwapEntryPage(Page, 0);
if (SwapEntry != 0)
{
MmSetPageEntrySectionSegment(Context.Segment, Context.Offset, MAKE_SWAP_SSE(SwapEntry));
}
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
MmReleasePageMemoryConsumer(MC_USER, Page);
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
return(STATUS_SUCCESS);
@ -1611,14 +1529,14 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
Address);
KEBUGCHECK(0);
}
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
MmReleasePageMemoryConsumer(MC_USER, Page);
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
return(STATUS_SUCCESS);
}
else if (!Context.WasDirty && Context.Private && SwapEntry != 0)
{
MmSetSavedSwapEntryPage(PhysicalAddress, 0);
MmSetSavedSwapEntryPage(Page, 0);
Status = MmCreatePageFileMapping(AddressSpace->Process,
Address,
SwapEntry);
@ -1626,7 +1544,7 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
{
KEBUGCHECK(0);
}
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
MmReleasePageMemoryConsumer(MC_USER, Page);
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
return(STATUS_SUCCESS);
@ -1650,10 +1568,10 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(MemoryArea->Process,
Address,
MemoryArea->Attributes,
PhysicalAddress,
FALSE);
&Page,
1);
MmSetDirtyPage(MemoryArea->Process, Address);
MmInsertRmap(PhysicalAddress,
MmInsertRmap(Page,
MemoryArea->Process,
Address);
}
@ -1667,13 +1585,13 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(MemoryArea->Process,
Address,
MemoryArea->Attributes,
PhysicalAddress,
FALSE);
&Page,
1);
MmSetDirtyPage(MemoryArea->Process, Address);
MmInsertRmap(PhysicalAddress,
MmInsertRmap(Page,
MemoryArea->Process,
Address);
Entry = MAKE_SSE(PhysicalAddress.u.LowPart, 1);
Entry = MAKE_SSE(Page << PAGE_SHIFT, 1);
MmSetPageEntrySectionSegment(Context.Segment, Context.Offset, Entry);
}
PageOp->Status = STATUS_UNSUCCESSFUL;
@ -1685,7 +1603,7 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
/*
* Write the page to the pagefile
*/
Status = MmWriteToSwapPage(SwapEntry, &PhysicalAddress);
Status = MmWriteToSwapPage(SwapEntry, Page);
if (!NT_SUCCESS(Status))
{
DPRINT1("MM: Failed to write to swap page (Status was 0x%.8X)\n",
@ -1699,10 +1617,10 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(MemoryArea->Process,
Address,
MemoryArea->Attributes,
PhysicalAddress,
FALSE);
&Page,
1);
MmSetDirtyPage(MemoryArea->Process, Address);
MmInsertRmap(PhysicalAddress,
MmInsertRmap(Page,
MemoryArea->Process,
Address);
}
@ -1711,13 +1629,13 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
Status = MmCreateVirtualMapping(MemoryArea->Process,
Address,
MemoryArea->Attributes,
PhysicalAddress,
FALSE);
&Page,
1);
MmSetDirtyPage(MemoryArea->Process, Address);
MmInsertRmap(PhysicalAddress,
MmInsertRmap(Page,
MemoryArea->Process,
Address);
Entry = MAKE_SSE(PhysicalAddress.u.LowPart, 1);
Entry = MAKE_SSE(Page << PAGE_SHIFT, 1);
MmSetPageEntrySectionSegment(Context.Segment, Context.Offset, Entry);
}
PageOp->Status = STATUS_UNSUCCESSFUL;
@ -1728,8 +1646,8 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
/*
* Otherwise we have succeeded.
*/
DPRINT("MM: Wrote section page 0x%.8X to swap!\n", PhysicalAddress);
MmSetSavedSwapEntryPage(PhysicalAddress, 0);
DPRINT("MM: Wrote section page 0x%.8X to swap!\n", Page << PAGE_SHIFT);
MmSetSavedSwapEntryPage(Page, 0);
if (Context.Segment->Flags & MM_PAGEFILE_SEGMENT ||
Context.Segment->Characteristics & IMAGE_SECTION_CHAR_SHARED)
{
@ -1737,7 +1655,7 @@ MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
}
else
{
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
MmReleasePageMemoryConsumer(MC_USER, Page);
}
if (Context.Private)
@ -1770,7 +1688,7 @@ MmWritePageSectionView(PMADDRESS_SPACE AddressSpace,
ULONG Offset;
PSECTION_OBJECT Section;
PMM_SECTION_SEGMENT Segment;
PHYSICAL_ADDRESS PhysicalAddress;
PFN_TYPE Page;
SWAPENTRY SwapEntry;
ULONG Entry;
BOOLEAN Private;
@ -1832,16 +1750,15 @@ MmWritePageSectionView(PMADDRESS_SPACE AddressSpace,
AddressSpace->Process ? AddressSpace->Process->UniqueProcessId : 0, Address);
KEBUGCHECK(0);
}
PhysicalAddress =
MmGetPhysicalAddressForProcess(AddressSpace->Process, Address);
SwapEntry = MmGetSavedSwapEntryPage(PhysicalAddress);
Page = MmGetPfnForProcess(AddressSpace->Process, Address);
SwapEntry = MmGetSavedSwapEntryPage(Page);
/*
* Check for a private (COWed) page.
*/
if (Segment->Characteristics & IMAGE_SECTION_CHAR_BSS ||
IS_SWAP_FROM_SSE(Entry) ||
(LONGLONG)PAGE_FROM_SSE(Entry) != PhysicalAddress.QuadPart)
PFN_FROM_SSE(Entry) != Page)
{
Private = TRUE;
}
@ -1853,7 +1770,7 @@ MmWritePageSectionView(PMADDRESS_SPACE AddressSpace,
/*
* Speculatively set all mappings of the page to clean.
*/
MmSetCleanAllRmaps(PhysicalAddress);
MmSetCleanAllRmaps(Page);
/*
* If this page was direct mapped from the cache then the cache manager
@ -1876,23 +1793,23 @@ MmWritePageSectionView(PMADDRESS_SPACE AddressSpace,
SwapEntry = MmAllocSwapPage();
if (SwapEntry == 0)
{
MmSetDirtyAllRmaps(PhysicalAddress);
MmSetDirtyAllRmaps(Page);
PageOp->Status = STATUS_UNSUCCESSFUL;
MmspCompleteAndReleasePageOp(PageOp);
return(STATUS_PAGEFILE_QUOTA);
}
MmSetSavedSwapEntryPage(PhysicalAddress, SwapEntry);
MmSetSavedSwapEntryPage(Page, SwapEntry);
}
/*
* Write the page to the pagefile
*/
Status = MmWriteToSwapPage(SwapEntry, &PhysicalAddress);
Status = MmWriteToSwapPage(SwapEntry, Page);
if (!NT_SUCCESS(Status))
{
DPRINT1("MM: Failed to write to swap page (Status was 0x%.8X)\n",
Status);
MmSetDirtyAllRmaps(PhysicalAddress);
MmSetDirtyAllRmaps(Page);
PageOp->Status = STATUS_UNSUCCESSFUL;
MmspCompleteAndReleasePageOp(PageOp);
return(STATUS_UNSUCCESSFUL);
@ -1901,7 +1818,7 @@ MmWritePageSectionView(PMADDRESS_SPACE AddressSpace,
/*
* Otherwise we have succeeded.
*/
DPRINT("MM: Wrote section page 0x%.8X to swap!\n", PhysicalAddress);
DPRINT("MM: Wrote section page 0x%.8X to swap!\n", Page << PAGE_SHIFT);
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
return(STATUS_SUCCESS);
@ -1945,17 +1862,16 @@ MmAlterViewAttributes(PMADDRESS_SPACE AddressSpace,
{
ULONG Offset;
ULONG Entry;
LARGE_INTEGER PhysicalAddress;
PFN_TYPE Page;
Offset = (ULONG)Address - (ULONG)MemoryArea->BaseAddress;
Entry = MmGetPageEntrySectionSegment(Segment, Offset);
PhysicalAddress =
MmGetPhysicalAddressForProcess(AddressSpace->Process, Address);
Page = MmGetPfnForProcess(AddressSpace->Process, Address);
Protect = PAGE_READONLY;
if ((Segment->Characteristics & IMAGE_SECTION_CHAR_BSS ||
if (Segment->Characteristics & IMAGE_SECTION_CHAR_BSS ||
IS_SWAP_FROM_SSE(Entry) ||
(LONGLONG)PAGE_FROM_SSE(Entry) != PhysicalAddress.QuadPart))
PFN_FROM_SSE(Entry) != Page)
{
Protect = NewProtect;
}
@ -2061,9 +1977,9 @@ MmpFreePageFileSegment(PMM_SECTION_SEGMENT Segment)
ULONG Offset;
ULONG Entry;
ULONG SavedSwapEntry;
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
Page.u.HighPart = 0;
Page = 0;
Length = PAGE_ROUND_UP(Segment->Length);
for (Offset = 0; Offset < Length; Offset += PAGE_SIZE)
@ -2077,7 +1993,7 @@ MmpFreePageFileSegment(PMM_SECTION_SEGMENT Segment)
}
else
{
Page.u.LowPart = PAGE_FROM_SSE(Entry);
Page = PFN_FROM_SSE(Entry);
SavedSwapEntry = MmGetSavedSwapEntryPage(Page);
if (SavedSwapEntry != 0)
{
@ -3330,8 +3246,7 @@ NtMapViewOfSection(HANDLE SectionHandle,
VOID STATIC
MmFreeSectionPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry,
BOOLEAN Dirty)
PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
{
PMEMORY_AREA MArea;
ULONG Entry;
@ -3382,7 +3297,7 @@ MmFreeSectionPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
*/
if (Segment->Flags & MM_DATAFILE_SEGMENT)
{
if (PhysAddr.QuadPart == PAGE_FROM_SSE(Entry) && Dirty)
if (Page == PFN_FROM_SSE(Entry) && Dirty)
{
FileObject = MemoryArea->Data.SectionData.Section->FileObject;
Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
@ -3403,10 +3318,10 @@ MmFreeSectionPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
}
MmFreeSwapPage(SwapEntry);
}
else if (PhysAddr.QuadPart != 0)
else if (Page != 0)
{
if (IS_SWAP_FROM_SSE(Entry) ||
PhysAddr.QuadPart != (PAGE_FROM_SSE(Entry)))
Page != PFN_FROM_SSE(Entry))
{
/*
* Sanity check
@ -3419,18 +3334,18 @@ MmFreeSectionPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
/*
* Just dereference private pages
*/
SavedSwapEntry = MmGetSavedSwapEntryPage(PhysAddr);
SavedSwapEntry = MmGetSavedSwapEntryPage(Page);
if (SavedSwapEntry != 0)
{
MmFreeSwapPage(SavedSwapEntry);
MmSetSavedSwapEntryPage(PhysAddr, 0);
MmSetSavedSwapEntryPage(Page, 0);
}
MmDeleteRmap(PhysAddr, MArea->Process, Address);
MmReleasePageMemoryConsumer(MC_USER, PhysAddr);
MmDeleteRmap(Page, MArea->Process, Address);
MmReleasePageMemoryConsumer(MC_USER, Page);
}
else
{
MmDeleteRmap(PhysAddr, MArea->Process, Address);
MmDeleteRmap(Page, MArea->Process, Address);
MmUnsharePageEntrySectionSegment(Section, Segment, Offset, Dirty, FALSE);
}
}
@ -3778,7 +3693,7 @@ MmAllocateSection (IN ULONG Length)
DPRINT("Result %p\n",Result);
for (i = 0; i < PAGE_ROUND_UP(Length) / PAGE_SIZE; i++)
{
PHYSICAL_ADDRESS Page;
PFN_TYPE Page;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
if (!NT_SUCCESS(Status))
@ -3789,8 +3704,8 @@ MmAllocateSection (IN ULONG Length)
Status = MmCreateVirtualMapping (NULL,
((char*)Result + (i * PAGE_SIZE)),
PAGE_READWRITE,
Page,
TRUE);
&Page,
1);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: slab.c,v 1.12 2004/04/10 22:35:26 gdalsnes Exp $
/* $Id: slab.c,v 1.13 2004/08/01 07:24:58 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -107,23 +107,23 @@ PSLAB_CACHE_PAGE
ExGrowSlabCache(PSLAB_CACHE Slab)
{
PSLAB_CACHE_PAGE SlabPage;
PHYSICAL_ADDRESS PhysicalPage;
PFN_TYPE Pfn;
PVOID Page;
NTSTATUS Status;
ULONG i;
PSLAB_CACHE_BUFCTL BufCtl;
PVOID Object;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &PhysicalPage);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Pfn);
if (!NT_SUCCESS(Status))
{
return(NULL);
}
Page = ExAllocatePageWithPhysPage(PhysicalPage);
Page = ExAllocatePageWithPhysPage(Pfn);
if (Page == NULL)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, PhysicalPage);
MmReleasePageMemoryConsumer(MC_NPPOOL, Pfn);
return(NULL);
}
@ -303,7 +303,7 @@ ExDestroySlabCache(PSLAB_CACHE Slab)
while (current_entry != &Slab->PageListHead)
{
PVOID Base;
PHYSICAL_ADDRESS PhysicalPage;
PFN_TYPE Page;
current = CONTAINING_RECORD(current_entry,
SLAB_CACHE_PAGE,
@ -318,9 +318,9 @@ ExDestroySlabCache(PSLAB_CACHE Slab)
Slab->Destructor(Object, Slab->BaseSize);
}
}
PhysicalPage = MmGetPhysicalAddressForProcess(NULL, Base);
Page = MmGetPfnForProcess(NULL, Base);
ExUnmapPage(Base);
MmReleasePageMemoryConsumer(MC_NPPOOL, PhysicalPage);
MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
}
ExFreePool(Slab);
}

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: wset.c,v 1.19 2004/07/17 03:03:52 ion Exp $
/* $Id: wset.c,v 1.20 2004/08/01 07:24:58 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/wset.c
@ -41,18 +41,18 @@
NTSTATUS
MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages)
{
PHYSICAL_ADDRESS CurrentPhysicalAddress;
PHYSICAL_ADDRESS NextPhysicalAddress;
PFN_TYPE CurrentPage;
PFN_TYPE NextPage;
NTSTATUS Status;
(*NrFreedPages) = 0;
CurrentPhysicalAddress = MmGetLRUFirstUserPage();
while (CurrentPhysicalAddress.QuadPart != 0 && Target > 0)
CurrentPage = MmGetLRUFirstUserPage();
while (CurrentPage != 0 && Target > 0)
{
NextPhysicalAddress = MmGetLRUNextUserPage(CurrentPhysicalAddress);
NextPage = MmGetLRUNextUserPage(CurrentPage);
Status = MmPageOutPhysicalAddress(CurrentPhysicalAddress);
Status = MmPageOutPhysicalAddress(CurrentPage);
if (NT_SUCCESS(Status))
{
DPRINT("Succeeded\n");
@ -61,10 +61,10 @@ MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages)
}
else if (Status == STATUS_PAGEFILE_QUOTA)
{
MmSetLRULastPage(CurrentPhysicalAddress);
MmSetLRULastPage(CurrentPage);
}
CurrentPhysicalAddress = NextPhysicalAddress;
CurrentPage = NextPage;
}
return(STATUS_SUCCESS);
}

View file

@ -1,4 +1,4 @@
/* $Id: w32call.c,v 1.12 2004/02/29 11:51:49 hbirr Exp $
/* $Id: w32call.c,v 1.13 2004/08/01 07:24:59 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -32,6 +32,14 @@
#define NDEBUG
#include <internal/debug.h>
#if defined(__GNUC__)
void * alloca(size_t size);
#elif defined(_MSC_VER)
void* _alloca(size_t size);
#else
#error Unknown compiler for alloca intrinsic stack allocation "function"
#endif
/* TYPES *******************************************************************/
typedef struct _NTW32CALL_SAVED_STATE
@ -139,13 +147,13 @@ NtCallbackReturn (PVOID Result,
VOID STATIC
PsFreeCallbackStackPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry,
PFN_TYPE Page, SWAPENTRY SwapEntry,
BOOLEAN Dirty)
{
assert(SwapEntry == 0);
if (PhysAddr.QuadPart != 0)
if (Page != 0)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, PhysAddr);
MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
}
}
@ -183,8 +191,10 @@ PsAllocateCallbackStack(ULONG StackSize)
PVOID KernelStack = NULL;
NTSTATUS Status;
PMEMORY_AREA StackArea;
ULONG i;
ULONG i, j;
PHYSICAL_ADDRESS BoundaryAddressMultiple;
PPFN_TYPE Pages = alloca(sizeof(PFN_TYPE) * (StackSize /PAGE_SIZE));
BoundaryAddressMultiple.QuadPart = 0;
StackSize = PAGE_ROUND_UP(StackSize);
@ -207,17 +217,28 @@ PsAllocateCallbackStack(ULONG StackSize)
}
for (i = 0; i < (StackSize / PAGE_SIZE); i++)
{
PHYSICAL_ADDRESS Page;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Pages[i]);
if (!NT_SUCCESS(Status))
{
for (j = 0; j < i; j++)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, Pages[j]);
}
return(NULL);
}
Status = MmCreateVirtualMapping(NULL,
(char*)KernelStack + (i * PAGE_SIZE),
PAGE_EXECUTE_READWRITE,
Page,
TRUE);
}
Status = MmCreateVirtualMapping(NULL,
KernelStack,
PAGE_READWRITE,
Pages,
StackSize / PAGE_SIZE);
if (!NT_SUCCESS(Status))
{
for (i = 0; i < (StackSize / PAGE_SIZE); i++)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, Pages[i]);
}
return(NULL);
}
return(KernelStack);
}