[NTOS]: Zeroed pages should go at the front, not the back of the zero list. Going to the back is a special boot-only case on MP, which isn't supported. Implement zero-only version of MiInsertPageInList, remove MiInsertZeroPageAtBack.

[NTOS]: Remove many other deprecated functions. Physical memory consistency should now be higher than in the past.

svn path=/trunk/; revision=48919
This commit is contained in:
Sir Richard 2010-09-28 14:38:30 +00:00
parent 354ad6b9eb
commit 5b9cd7fcae
6 changed files with 82 additions and 430 deletions

View file

@ -444,10 +444,8 @@ MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
return Base;
}
//
// In user-mode, let ReactOS do it
//
return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
UNIMPLEMENTED;
return NULL;
}
/*
@ -557,10 +555,7 @@ MmUnmapLockedPages(IN PVOID BaseAddress,
}
else
{
//
// Let ReactOS handle it
//
MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
UNIMPLEMENTED;
}
}

View file

@ -920,15 +920,9 @@ MiUnmapLockedPagesInUserSpace(
VOID
NTAPI
MiInsertInListTail(
MiInsertPageInList(
IN PMMPFNLIST ListHead,
IN PMMPFN Entry
);
VOID
NTAPI
MiInsertZeroListAtBack(
IN PFN_NUMBER PageIndex
IN PFN_NUMBER PageFrameIndex
);
VOID

View file

@ -71,137 +71,6 @@ MiZeroPhysicalPage(IN PFN_NUMBER PageFrameIndex)
MiUnmapPageInHyperSpace(Process, VirtualAddress, OldIrql);
}
VOID
NTAPI
MiInsertInListTail(IN PMMPFNLIST ListHead,
IN PMMPFN Entry)
{
PFN_NUMBER OldBlink, EntryIndex = MiGetPfnEntryIndex(Entry);
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
ASSERT_LIST_INVARIANT(ListHead);
/* Get the back link */
OldBlink = ListHead->Blink;
if (OldBlink != LIST_HEAD)
{
/* Set the back pointer to point to us now */
MiGetPfnEntry(OldBlink)->u1.Flink = EntryIndex;
}
else
{
/* Set the list to point to us */
ListHead->Flink = EntryIndex;
}
/* Set the entry to point to the list head forwards, and the old page backwards */
Entry->u1.Flink = LIST_HEAD;
Entry->u2.Blink = OldBlink;
/* And now the head points back to us, since we are last */
ListHead->Blink = EntryIndex;
ListHead->Total++;
ASSERT_LIST_INVARIANT(ListHead);
}
VOID
NTAPI
MiInsertZeroListAtBack(IN PFN_NUMBER EntryIndex)
{
PFN_NUMBER OldBlink;
PMMPFNLIST ListHead;
PMMPFN Pfn1;
#if 0
PMMPFN Blink;
ULONG Color;
PMMCOLOR_TABLES ColorHead;
#endif
/* Make sure the PFN lock is held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
/* Get the descriptor */
Pfn1 = MiGetPfnEntry(EntryIndex);
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
ASSERT(Pfn1->u4.MustBeCached == 0);
ASSERT(Pfn1->u3.e1.Rom == 0);
ASSERT(Pfn1->u3.e1.RemovalRequested == 0);
ASSERT(Pfn1->u4.InPageError == 0);
/* Use the zero list */
ListHead = &MmZeroedPageListHead;
ASSERT_LIST_INVARIANT(ListHead);
ListHead->Total++;
/* Get the back link */
OldBlink = ListHead->Blink;
if (OldBlink != LIST_HEAD)
{
/* Set the back pointer to point to us now */
MiGetPfnEntry(OldBlink)->u1.Flink = EntryIndex;
}
else
{
/* Set the list to point to us */
ListHead->Flink = EntryIndex;
}
/* Set the entry to point to the list head forwards, and the old page backwards */
Pfn1->u1.Flink = LIST_HEAD;
Pfn1->u2.Blink = OldBlink;
/* And now the head points back to us, since we are last */
ListHead->Blink = EntryIndex;
ASSERT_LIST_INVARIANT(ListHead);
/* Update the page location */
Pfn1->u3.e1.PageLocation = ZeroedPageList;
/* Update the available page count */
MmAvailablePages++;
/* Check if we've reached the configured low memory threshold */
if (MmAvailablePages == MmLowMemoryThreshold)
{
/* Clear the event, because now we're ABOVE the threshold */
KeClearEvent(MiLowMemoryEvent);
}
else if (MmAvailablePages == MmHighMemoryThreshold)
{
/* Otherwise check if we reached the high threshold and signal the event */
KeSetEvent(MiHighMemoryEvent, 0, FALSE);
}
#if 0
/* Get the page color */
Color = EntryIndex & MmSecondaryColorMask;
/* Get the first page on the color list */
ColorHead = &MmFreePagesByColor[ZeroedPageList][Color];
if (ColorHead->Flink == LIST_HEAD)
{
/* The list is empty, so we are the first page */
Pfn1->u4.PteFrame = -1;
ColorHead->Flink = EntryIndex;
}
else
{
/* Get the previous page */
Blink = (PMMPFN)ColorHead->Blink;
/* Make it link to us */
Pfn1->u4.PteFrame = MiGetPfnEntryIndex(Blink);
Blink->OriginalPte.u.Long = EntryIndex;
}
/* Now initialize our own list pointers */
ColorHead->Blink = Pfn1;
Pfn1->OriginalPte.u.Long = LIST_HEAD;
/* And increase the count in the colored list */
ColorHead->Count++;
#endif
}
VOID
NTAPI
MiUnlinkFreeOrZeroedPage(IN PMMPFN Entry)
@ -527,45 +396,6 @@ MiRemoveZeroPage(IN ULONG Color)
/* Return the page */
return PageIndex;
}
PMMPFN
NTAPI
MiRemoveHeadList(IN PMMPFNLIST ListHead)
{
PFN_NUMBER Entry, Flink;
PMMPFN Pfn1;
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
ASSERT_LIST_INVARIANT(ListHead);
/* Get the entry that's currently first on the list */
Entry = ListHead->Flink;
Pfn1 = MiGetPfnEntry(Entry);
/* Make the list point to the entry following the first one */
Flink = Pfn1->u1.Flink;
ListHead->Flink = Flink;
/* Check if the next entry is actually the list head */
if (ListHead->Flink != LIST_HEAD)
{
/* It isn't, so therefore whoever is coming next points back to the head */
MiGetPfnEntry(Flink)->u2.Blink = LIST_HEAD;
}
else
{
/* Then the list is empty, so the backlink should point back to us */
ListHead->Blink = LIST_HEAD;
}
/* We are not on a list anymore */
Pfn1->u1.Flink = Pfn1->u2.Blink = 0;
ListHead->Total--;
ASSERT_LIST_INVARIANT(ListHead);
/* Return the head element */
return Pfn1;
}
extern KEVENT ZeroPageThreadEvent;
@ -684,6 +514,82 @@ MiInsertPageInFreeList(IN PFN_NUMBER PageFrameIndex)
}
}
/* Note: This function is hardcoded only for the zeroed page list, for now */
VOID
NTAPI
MiInsertPageInList(IN PMMPFNLIST ListHead,
IN PFN_NUMBER PageFrameIndex)
{
PFN_NUMBER Flink;
PMMPFN Pfn1, Pfn2;
MMLISTS ListName;
/* For free pages, use MiInsertPageInFreeList */
ASSERT(ListHead != &MmFreePageListHead);
/* Make sure the lock is held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
/* Make sure the PFN is valid */
ASSERT((PageFrameIndex) &&
(PageFrameIndex <= MmHighestPhysicalPage) &&
(PageFrameIndex >= MmLowestPhysicalPage));
/* Page should be unused */
Pfn1 = MiGetPfnEntry(PageFrameIndex);
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
ASSERT(Pfn1->u3.e1.Rom != 1);
/* Only used for zero pages in ReactOS */
ListName = ListHead->ListName;
ASSERT(ListName == ZeroedPageList);
ListHead->Total++;
/* Don't handle bad pages yet yet */
ASSERT(Pfn1->u3.e1.RemovalRequested == 0);
/* Make the head of the list point to this page now */
Flink = ListHead->Flink;
ListHead->Flink = PageFrameIndex;
/* Make the page point to the previous head, and back to the list */
Pfn1->u1.Flink = Flink;
Pfn1->u2.Blink = LIST_HEAD;
/* Was the list empty? */
if (Flink != LIST_HEAD)
{
/* It wasn't, so update the backlink of the previous head page */
Pfn2 = MiGetPfnEntry(Flink);
Pfn2->u2.Blink = PageFrameIndex;
}
else
{
/* It was empty, so have it loop back around to this new page */
ListHead->Blink = PageFrameIndex;
}
/* Move the page onto its new location */
Pfn1->u3.e1.PageLocation = ListName;
/* One more page on the system */
MmAvailablePages++;
/* Check if we've reached the configured low memory threshold */
if (MmAvailablePages == MmLowMemoryThreshold)
{
/* Clear the event, because now we're ABOVE the threshold */
KeClearEvent(MiLowMemoryEvent);
}
else if (MmAvailablePages == MmHighMemoryThreshold)
{
/* Otherwise check if we reached the high threshold and signal the event */
KeSetEvent(MiHighMemoryEvent, 0, FALSE);
}
/* FIXME: Color code handling */
}
VOID
NTAPI
MiInitializePfn(IN PFN_NUMBER PageFrameIndex,

View file

@ -682,7 +682,7 @@ MmZeroPageThreadMain(PVOID Ignored)
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MiInsertZeroListAtBack(PageIndex);
MiInsertPageInList(&MmZeroedPageListHead, PageIndex);
Count++;
}
DPRINT("Zeroed %d pages.\n", Count);

View file

@ -52,140 +52,6 @@ MmInitializeRmapList(VOID)
50);
}
NTSTATUS
NTAPI
MmWritePagePhysicalAddress(PFN_NUMBER Page)
{
PMM_RMAP_ENTRY entry;
PMEMORY_AREA MemoryArea;
PMMSUPPORT AddressSpace;
ULONG Type;
PVOID Address;
PEPROCESS Process;
PMM_PAGEOP PageOp;
ULONG Offset;
NTSTATUS Status = STATUS_SUCCESS;
/*
* Check that the address still has a valid rmap; then reference the
* process so it isn't freed while we are working.
*/
ExAcquireFastMutex(&RmapListLock);
entry = MmGetRmapListHeadPage(Page);
if (entry == NULL)
{
ExReleaseFastMutex(&RmapListLock);
return(STATUS_UNSUCCESSFUL);
}
Process = entry->Process;
Address = entry->Address;
if ((((ULONG_PTR)Address) & 0xFFF) != 0)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
if (Address < MmSystemRangeStart)
{
Status = ObReferenceObjectByPointer(Process, PROCESS_ALL_ACCESS, NULL, KernelMode);
ExReleaseFastMutex(&RmapListLock);
if (!NT_SUCCESS(Status))
{
return Status;
}
AddressSpace = &Process->Vm;
}
else
{
ExReleaseFastMutex(&RmapListLock);
AddressSpace = MmGetKernelAddressSpace();
}
/*
* Lock the address space; then check that the address we are using
* still corresponds to a valid memory area (the page might have been
* freed or paged out after we read the rmap entry.)
*/
MmLockAddressSpace(AddressSpace);
MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, Address);
if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
{
MmUnlockAddressSpace(AddressSpace);
if (Address < MmSystemRangeStart)
{
ObDereferenceObject(Process);
}
return(STATUS_UNSUCCESSFUL);
}
Type = MemoryArea->Type;
if (Type == MEMORY_AREA_SECTION_VIEW)
{
Offset = (ULONG_PTR)Address - (ULONG_PTR)MemoryArea->StartingAddress
+ MemoryArea->Data.SectionData.ViewOffset;
/*
* Get or create a pageop
*/
PageOp = MmGetPageOp(MemoryArea, NULL, 0,
MemoryArea->Data.SectionData.Segment,
Offset, MM_PAGEOP_PAGEOUT, TRUE);
if (PageOp == NULL)
{
MmUnlockAddressSpace(AddressSpace);
if (Address < MmSystemRangeStart)
{
ObDereferenceObject(Process);
}
return(STATUS_UNSUCCESSFUL);
}
/*
* Release locks now we have a page op.
*/
MmUnlockAddressSpace(AddressSpace);
/*
* Do the actual page out work.
*/
Status = MmWritePageSectionView(AddressSpace, MemoryArea,
Address, PageOp);
}
else if (Type == MEMORY_AREA_VIRTUAL_MEMORY)
{
PageOp = MmGetPageOp(MemoryArea, Address < MmSystemRangeStart ? Process->UniqueProcessId : NULL,
Address, NULL, 0, MM_PAGEOP_PAGEOUT, TRUE);
if (PageOp == NULL)
{
MmUnlockAddressSpace(AddressSpace);
if (Address < MmSystemRangeStart)
{
ObDereferenceObject(Process);
}
return(STATUS_UNSUCCESSFUL);
}
/*
* Release locks now we have a page op.
*/
MmUnlockAddressSpace(AddressSpace);
/*
* Do the actual page out work.
*/
Status = MmWritePageVirtualMemory(AddressSpace, MemoryArea,
Address, PageOp);
}
else
{
KeBugCheck(MEMORY_MANAGEMENT);
}
if (Address < MmSystemRangeStart)
{
ObDereferenceObject(Process);
}
return(Status);
}
NTSTATUS
NTAPI
MmPageOutPhysicalAddress(PFN_NUMBER Page)

View file

@ -162,115 +162,6 @@ MiProtectVirtualMemory(IN PEPROCESS Process,
return Status;
}
PVOID
NTAPI
MiMapLockedPagesInUserSpace(IN PMDL Mdl,
IN PVOID BaseVa,
IN MEMORY_CACHING_TYPE CacheType,
IN PVOID BaseAddress)
{
PVOID Base;
PPFN_NUMBER MdlPages;
ULONG PageCount;
PEPROCESS CurrentProcess;
NTSTATUS Status;
ULONG Protect;
MEMORY_AREA *Result;
LARGE_INTEGER BoundaryAddressMultiple;
/* Calculate the number of pages required. */
MdlPages = (PPFN_NUMBER)(Mdl + 1);
PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
/* Set default page protection */
Protect = PAGE_READWRITE;
if (CacheType == MmNonCached) Protect |= PAGE_NOCACHE;
BoundaryAddressMultiple.QuadPart = 0;
Base = BaseAddress;
CurrentProcess = PsGetCurrentProcess();
MmLockAddressSpace(&CurrentProcess->Vm);
Status = MmCreateMemoryArea(&CurrentProcess->Vm,
MEMORY_AREA_MDL_MAPPING,
&Base,
PageCount * PAGE_SIZE,
Protect,
&Result,
(Base != NULL),
0,
BoundaryAddressMultiple);
MmUnlockAddressSpace(&CurrentProcess->Vm);
if (!NT_SUCCESS(Status))
{
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
return NULL;
}
/* Throw exception */
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
ASSERT(0);
}
/* Set the virtual mappings for the MDL pages. */
if (Mdl->MdlFlags & MDL_IO_SPACE)
{
/* Map the pages */
Status = MmCreateVirtualMappingUnsafe(CurrentProcess,
Base,
Protect,
MdlPages,
PageCount);
}
else
{
/* Map the pages */
Status = MmCreateVirtualMapping(CurrentProcess,
Base,
Protect,
MdlPages,
PageCount);
}
/* Check if the mapping suceeded */
if (!NT_SUCCESS(Status))
{
/* If it can fail, return NULL */
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
/* Throw exception */
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
}
/* Return the base */
Base = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
return Base;
}
VOID
NTAPI
MiUnmapLockedPagesInUserSpace(IN PVOID BaseAddress,
IN PMDL Mdl)
{
PMEMORY_AREA MemoryArea;
/* Sanity check */
ASSERT(Mdl->Process == PsGetCurrentProcess());
/* Find the memory area */
MemoryArea = MmLocateMemoryAreaByAddress(&Mdl->Process->Vm,
BaseAddress);
ASSERT(MemoryArea);
/* Free it */
MmFreeMemoryArea(&Mdl->Process->Vm,
MemoryArea,
NULL,
NULL);
}
/* SYSTEM CALLS ***************************************************************/
NTSTATUS NTAPI