- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).

- MmCreateMdl, MmSizeOfMdl: No Change.
  - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
    - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
    - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
  - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
    - The *Ex function is now implemented.
    - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
      - The code is cleaner, better commented, and better handles partial MDLs.
      - Cache flags are still ignored (so the Ex functionality isn't really there).
  - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
    - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
      - This frees up 256MB of Kernel Virtual Address Space.
      - Takes advantage of all System PTE functionality.
      - Once again, optimizations in the System PTE code will be felt here.
    - For user-space mappings however, the old code is still kept and used.
      - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
  - MmProbeAndLockPages, MmUnlockPages:
    - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
    - Probe for write is only done for write access to user pages (as documented).
    - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
    - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
    - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
      - You don't want to be holding a spinlock/mutex while doing disk I/O!
    - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
      - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
      - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
    - Reference and lock every valid page that has a PFN entry (non-I/O Pages).
      - The older code did not seem to lock pages that had to be faulted in (weren't already valid).
    - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
      - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
    - In MmUnlockPages, not as many changes, but we now:
      - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
        - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
      - Use the PFN lock when checking pages and unlocking/dereferencing them.
  - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
  - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.

svn path=/trunk/; revision=41707
This commit is contained in:
ReactOS Portable Systems Group 2009-06-30 08:29:22 +00:00
parent fdf20e8c9a
commit bd27f08336
8 changed files with 1736 additions and 923 deletions

View file

@ -247,6 +247,17 @@ WriteCacheSegment(PCACHE_SEGMENT CacheSeg)
{
Size = CacheSeg->Bcb->CacheSegmentSize;
}
//
// Nonpaged pool PDEs in ReactOS must actually be synchronized between the
// MmGlobalPageDirectory and the real system PDE directory. What a mess...
//
{
int i = 0;
do
{
MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)CacheSeg->BaseAddress + (i << PAGE_SHIFT)));
} while (++i < (Size >> PAGE_SHIFT));
}
Mdl = alloca(MmSizeOfMdl(CacheSeg->BaseAddress, Size));
MmInitializeMdl(Mdl, CacheSeg->BaseAddress, Size);
MmBuildMdlForNonPagedPool(Mdl);

File diff suppressed because it is too large Load diff

View file

@ -53,6 +53,8 @@ extern PMMPTE MiFirstReservedZeroingPte;
extern MI_PFN_CACHE_ATTRIBUTE MiPlatformCacheAttributes[2][MmMaximumCacheType];
extern PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
extern ULONG MmBootImageSize;
extern PMMPTE MmSystemPtesStart[MaximumPtePoolTypes];
extern PMMPTE MmSystemPtesEnd[MaximumPtePoolTypes];
VOID
NTAPI
@ -106,4 +108,31 @@ MiCheckForContiguousMemory(
IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
);
PMDL
NTAPI
MiAllocatePagesForMdl(
IN PHYSICAL_ADDRESS LowAddress,
IN PHYSICAL_ADDRESS HighAddress,
IN PHYSICAL_ADDRESS SkipBytes,
IN SIZE_T TotalBytes,
IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
IN ULONG Flags
);
PVOID
NTAPI
MiMapLockedPagesInUserSpace(
IN PMDL Mdl,
IN PVOID BaseVa,
IN MEMORY_CACHING_TYPE CacheType,
IN PVOID BaseAddress
);
VOID
NTAPI
MiUnmapLockedPagesInUserSpace(
IN PVOID BaseAddress,
IN PMDL Mdl
);
/* EOF */

View file

@ -432,13 +432,280 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
}
} while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
//
// And if we get here, it means no suitable physical memory runs were found
//
return 0;
}
PMDL
NTAPI
MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
IN PHYSICAL_ADDRESS HighAddress,
IN PHYSICAL_ADDRESS SkipBytes,
IN SIZE_T TotalBytes,
IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
IN ULONG MdlFlags)
{
PMDL Mdl;
PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
PPFN_NUMBER MdlPage, LastMdlPage;
KIRQL OldIrql;
PLIST_ENTRY ListEntry;
PPHYSICAL_PAGE Pfn1;
ULONG LookForZeroedPages;
ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
//
// Convert the low address into a PFN
//
LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
//
// Convert, and normalize, the high address into a PFN
//
HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
//
// Validate skipbytes and convert them into pages
//
if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
//
// Now compute the number of pages the MDL will cover
//
PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
do
{
//
// Try creating an MDL for these many pages
//
Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
if (Mdl) break;
//
// This function is not required to return the amount of pages requested
// In fact, it can return as little as 1 page, and callers are supposed
// to deal with this scenario. So re-attempt the allocation with less
// pages than before, and see if it worked this time.
//
PageCount -= (PageCount >> 4);
} while (PageCount);
//
// Wow, not even a single page was around!
//
if (!Mdl) return NULL;
//
// This is where the page array starts....
//
MdlPage = (PPFN_NUMBER)(Mdl + 1);
//
// Lock the PFN database
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
// Are we looking for any pages, without discriminating?
//
if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
{
//
// Well then, let's go shopping
//
while (PagesFound < PageCount)
{
//
// Do we have zeroed pages?
//
if (!IsListEmpty(&FreeZeroedPageListHead))
{
//
// Grab a zero page
//
ListEntry = RemoveTailList(&FreeZeroedPageListHead);
}
else if (!IsListEmpty(&FreeUnzeroedPageListHead))
{
//
// Nope, grab an unzeroed page
//
ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
UnzeroedPageCount--;
}
else
{
//
// This is not good... hopefully we have at least SOME pages
//
ASSERT(PagesFound);
break;
}
//
// Get the PFN entry for this page
//
Pfn1 = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
//
// Make sure it's really free
//
ASSERT(Pfn1->Flags.Type == MM_PHYSICAL_PAGE_FREE);
ASSERT(Pfn1->MapCount == 0);
ASSERT(Pfn1->ReferenceCount == 0);
//
// Allocate it and mark it
//
Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
Pfn1->Flags.Consumer = MC_NPPOOL;
Pfn1->Flags.StartOfAllocation = 1;
Pfn1->Flags.EndOfAllocation = 1;
Pfn1->ReferenceCount = 1;
Pfn1->LockCount = 0;
Pfn1->MapCount = 0;
Pfn1->SavedSwapEntry = 0;
//
// Decrease available pages
//
MmStats.NrSystemPages++;
MmStats.NrFreePages--;
//
// Save it into the MDL
//
*MdlPage++ = MiGetPfnEntryIndex(Pfn1);
PagesFound++;
}
}
else
{
//
// You want specific range of pages. We'll do this in two runs
//
for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
{
//
// Scan the range you specified
//
for (Page = LowPage; Page < HighPage; Page++)
{
//
// Get the PFN entry for this page
//
Pfn1 = MiGetPfnEntry(Page);
//
// Make sure it's free and if this is our first pass, zeroed
//
if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) continue;
if (Pfn1->Flags.Zero != LookForZeroedPages) continue;
//
// Sanity checks
//
ASSERT(Pfn1->MapCount == 0);
ASSERT(Pfn1->ReferenceCount == 0);
//
// Now setup the page and mark it
//
Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
Pfn1->Flags.Consumer = MC_NPPOOL;
Pfn1->ReferenceCount = 1;
Pfn1->Flags.StartOfAllocation = 1;
Pfn1->Flags.EndOfAllocation = 1;
Pfn1->LockCount = 0;
Pfn1->MapCount = 0;
Pfn1->SavedSwapEntry = 0;
//
// If this page was unzeroed, we've consumed such a page
//
if (!Pfn1->Flags.Zero) UnzeroedPageCount--;
//
// Decrease available pages
//
MmStats.NrSystemPages++;
MmStats.NrFreePages--;
//
// Save this page into the MDL
//
*MdlPage++ = Page;
if (++PagesFound == PageCount) break;
}
//
// If the first pass was enough, don't keep going, otherwise, go again
//
if (PagesFound == PageCount) break;
}
}
//
// Now release the PFN count
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// We might've found less pages, but not more ;-)
//
if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
if (!PagesFound)
{
//
// If we didn' tfind any pages at all, fail
//
DPRINT1("NO MDL PAGES!\n");
ExFreePool(Mdl);
return NULL;
}
//
// Write out how many pages we found
//
Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
//
// Terminate the MDL array if there's certain missing pages
//
if (PagesFound != PageCount) *MdlPage = -1;
//
// Now go back and loop over all the MDL pages
//
MdlPage = (PPFN_NUMBER)(Mdl + 1);
LastMdlPage = MdlPage + PagesFound;
while (MdlPage < LastMdlPage)
{
//
// Check if we've reached the end
//
Page = *MdlPage;
if (Page == -1) break;
//
// Get the PFN entry for the page and check if we should zero it out
//
Pfn1 = MiGetPfnEntry(Page);
if (Pfn1->Flags.Zero == 0) MiZeroPage(Page);
}
//
// We're done, mark the pages as locked (should we lock them, though???)
//
Mdl->Process = NULL;
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
return Mdl;
}
PFN_TYPE
NTAPI
MmAllocEarlyPage(VOID)

View file

@ -1,918 +0,0 @@
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/mdl.c
* PURPOSE: Manipulates MDLs
*
* PROGRAMMERS: David Welch (welch@cwcom.net)
*/
/* INCLUDES ****************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#if defined (ALLOC_PRAGMA)
#pragma alloc_text(INIT, MmInitializeMdlImplementation)
#endif
/* GLOBALS *******************************************************************/
#define TAG_MDL TAG('M', 'D', 'L', ' ')
#define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
PVOID MiMdlMappingRegionBase = NULL;
RTL_BITMAP MiMdlMappingRegionAllocMap;
ULONG MiMdlMappingRegionHint;
KSPIN_LOCK MiMdlMappingRegionLock;
/* PRIVATE FUNCTIONS **********************************************************/
VOID
INIT_FUNCTION
NTAPI
MmInitializeMdlImplementation(VOID)
{
MEMORY_AREA* Result;
NTSTATUS Status;
PVOID Buffer;
PHYSICAL_ADDRESS BoundaryAddressMultiple;
BoundaryAddressMultiple.QuadPart = 0;
MiMdlMappingRegionHint = 0;
MiMdlMappingRegionBase = NULL;
MmLockAddressSpace(MmGetKernelAddressSpace());
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_MDL_MAPPING,
&MiMdlMappingRegionBase,
MI_MDL_MAPPING_REGION_SIZE,
PAGE_READWRITE,
&Result,
FALSE,
0,
BoundaryAddressMultiple);
if (!NT_SUCCESS(Status))
{
MmUnlockAddressSpace(MmGetKernelAddressSpace());
KeBugCheck(MEMORY_MANAGEMENT);
}
MmUnlockAddressSpace(MmGetKernelAddressSpace());
Buffer = ExAllocatePoolWithTag(NonPagedPool,
MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8),
TAG_MDL);
RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
RtlClearAllBits(&MiMdlMappingRegionAllocMap);
KeInitializeSpinLock(&MiMdlMappingRegionLock);
}
/* PUBLIC FUNCTIONS ***********************************************************/
/*
* @implemented
*/
PMDL
NTAPI
MmCreateMdl(IN PMDL Mdl,
IN PVOID Base,
IN ULONG Length)
{
ULONG Size;
/* Check if we don't have an MDL built */
if (!Mdl)
{
/* Calcualte the size we'll need and allocate the MDL */
Size = MmSizeOfMdl(Base, Length);
Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
if (!Mdl) return NULL;
}
/* Initialize it */
MmInitializeMdl(Mdl, Base, Length);
DPRINT("Creating MDL: %p\n", Mdl);
DPRINT("Base: %p. Length: %lx\n", Base, Length);
return Mdl;
}
/*
* @implemented
*/
ULONG
NTAPI
MmSizeOfMdl(IN PVOID Base,
IN ULONG Length)
{
/* Return the MDL size */
return sizeof(MDL) + (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
}
/*
* @implemented
*/
VOID
NTAPI
MmBuildMdlForNonPagedPool(IN PMDL Mdl)
{
ULONG i;
ULONG PageCount;
PPFN_NUMBER MdlPages;
PVOID Base;
DPRINT("Building MDL: %p\n", Mdl);
/* Sanity checks */
ASSERT(Mdl->ByteCount != 0);
ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
MDL_MAPPED_TO_SYSTEM_VA |
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_PARTIAL)) == 0);
/* We know the MDL isn't associated to a process now */
Mdl->Process = NULL;
/* Get page and VA information */
MdlPages = (PPFN_NUMBER)(Mdl + 1);
Base = Mdl->StartVa;
/* Set the system address and now get the page count */
Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa, Mdl->ByteCount);
ASSERT(PageCount != 0);
/* Go through each page */
for (i = 0; i < PageCount; i++)
{
/* Map it */
*MdlPages++ = MmGetPfnForProcess(NULL,
(PVOID)((ULONG_PTR)Base + (i * PAGE_SIZE)));
}
/* Set the final flag */
Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
}
/*
* @implemented
*/
VOID
NTAPI
MmFreePagesFromMdl(IN PMDL Mdl)
{
PVOID Base;
PPFN_NUMBER Pages;
LONG NumberOfPages;
DPRINT("Freeing MDL: %p\n", Mdl);
/* Sanity checks */
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
/* Get address and page information */
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
/* Loop all the MDL pages */
Pages = (PPFN_NUMBER)(Mdl + 1);
while (--NumberOfPages >= 0)
{
/* Dereference each one of them */
MmDereferencePage(Pages[NumberOfPages]);
}
/* Remove the pages locked flag */
Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
}
/*
* @implemented
*/
PVOID
NTAPI
MmMapLockedPages(IN PMDL Mdl,
IN KPROCESSOR_MODE AccessMode)
{
/* Call the extended version */
return MmMapLockedPagesSpecifyCache(Mdl,
AccessMode,
MmCached,
NULL,
TRUE,
HighPagePriority);
}
/*
* @implemented
*/
VOID
NTAPI
MmUnlockPages(IN PMDL Mdl)
{
ULONG i;
PPFN_NUMBER MdlPages;
PFN_NUMBER Page;
PEPROCESS Process;
PVOID Base;
ULONG Flags, PageCount;
DPRINT("Unlocking MDL: %p\n", Mdl);
/* Sanity checks */
ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
ASSERT(Mdl->ByteCount != 0);
/* Get the process associated and capture the flags which are volatile */
Process = Mdl->Process;
Flags = Mdl->MdlFlags;
/* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
{
/* Unmap the pages from system spage */
MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
}
/* Get the page count */
MdlPages = (PPFN_NUMBER)(Mdl + 1);
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
ASSERT(PageCount != 0);
/* We don't support AWE */
if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
/* Check if the buffer is mapped I/O space */
if (Flags & MDL_IO_SPACE)
{
/* Check if this was a wirte */
if (Flags & MDL_WRITE_OPERATION)
{
/* Windows keeps track of the modified bit */
}
/* Check if we have a process */
if (Process)
{
/* Handle the accounting of locked pages */
ASSERT(Process->NumberOfLockedPages > 0);
InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
-PageCount);
}
/* We're done */
Mdl->MdlFlags &= ~MDL_IO_SPACE;
Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
return;
}
/* Check if we have a process */
if (Process)
{
/* Handle the accounting of locked pages */
ASSERT(Process->NumberOfLockedPages > 0);
InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
-PageCount);
}
/* Scan each page */
for (i = 0; i < PageCount; i++)
{
/* Get the page entry */
/* Unlock and dereference it */
Page = MdlPages[i];
MmUnlockPage(Page);
MmDereferencePage(Page);
}
/* We're done */
Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
}
/*
* @implemented
*/
VOID
NTAPI
MmUnmapLockedPages(IN PVOID BaseAddress,
IN PMDL Mdl)
{
KIRQL oldIrql;
ULONG i, PageCount;
ULONG Base;
MEMORY_AREA *MemoryArea;
DPRINT("Unmapping MDL: %p\n", Mdl);
DPRINT("Base: %p\n", BaseAddress);
/* Sanity check */
ASSERT(Mdl->ByteCount != 0);
/* Check if this is a kernel request */
if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
{
/* Get base and count information */
Base = (ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset;
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
/* Sanity checks */
ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
ASSERT(PageCount != 0);
ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
/* ReactOS does not support this flag */
if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) ASSERT(FALSE);
/* Remove flags */
Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
MDL_PARTIAL_HAS_BEEN_MAPPED |
MDL_FREE_EXTRA_PTES);
/* If we came from non-paged pool, on ReactOS, we can leave */
if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) return;
/* Loop each page */
BaseAddress = PAGE_ALIGN(BaseAddress);
for (i = 0; i < PageCount; i++)
{
/* Delete it */
MmDeleteVirtualMapping(NULL,
(PVOID)((ULONG_PTR)BaseAddress + (i * PAGE_SIZE)),
FALSE,
NULL,
NULL);
}
/* Lock the mapping region */
KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
/* Deallocate all the pages used. */
Base = ((ULONG_PTR)BaseAddress - (ULONG_PTR)MiMdlMappingRegionBase) / PAGE_SIZE;
RtlClearBits(&MiMdlMappingRegionAllocMap, Base, PageCount);
MiMdlMappingRegionHint = min(MiMdlMappingRegionHint, Base);
/* Release the lock */
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
}
else
{
/* Sanity check */
ASSERT(Mdl->Process == PsGetCurrentProcess());
/* Find the memory area */
MemoryArea = MmLocateMemoryAreaByAddress(&Mdl->Process->Vm,
BaseAddress);
ASSERT(MemoryArea);
/* Free it */
MmFreeMemoryArea(&Mdl->Process->Vm,
MemoryArea,
NULL,
NULL);
}
}
/*
* @implemented
*/
VOID
NTAPI
MmProbeAndLockPages(IN PMDL Mdl,
IN KPROCESSOR_MODE AccessMode,
IN LOCK_OPERATION Operation)
{
PPFN_TYPE MdlPages;
PVOID Base, Address;
ULONG i, j;
ULONG NrPages;
NTSTATUS Status = STATUS_SUCCESS;
PFN_TYPE Page;
PEPROCESS CurrentProcess;
PETHREAD Thread;
PMMSUPPORT AddressSpace;
KIRQL OldIrql = KeGetCurrentIrql();
DPRINT("Probing MDL: %p\n", Mdl);
/* Sanity checks */
ASSERT(Mdl->ByteCount != 0);
ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
MDL_MAPPED_TO_SYSTEM_VA |
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_PARTIAL |
MDL_IO_SPACE)) == 0);
/* Get page and base information */
MdlPages = (PPFN_NUMBER)(Mdl + 1);
Base = (PVOID)Mdl->StartVa;
Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
NrPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
ASSERT(NrPages != 0);
/* Check if this is an MDL in I/O Space */
if (Mdl->StartVa >= MmSystemRangeStart &&
MmGetPfnForProcess(NULL, Mdl->StartVa) >= MmHighestPhysicalPage)
{
/* Just loop each page */
for (i = 0; i < NrPages; i++)
{
/* And map it */
MdlPages[i] = MmGetPfnForProcess(NULL,
(PVOID)((ULONG_PTR)Mdl->StartVa + (i * PAGE_SIZE)));
}
/* Set the flags and exit */
Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE;
return;
}
/* Get the thread and process */
Thread = PsGetCurrentThread();
if (Address <= MM_HIGHEST_USER_ADDRESS)
{
/* Get the process */
CurrentProcess = PsGetCurrentProcess();
}
else
{
/* No process */
CurrentProcess = NULL;
}
/* Check what kind of operaiton this is */
if (Operation != IoReadAccess)
{
/* Set the write flag */
Mdl->MdlFlags |= MDL_WRITE_OPERATION;
}
else
{
/* Remove the write flag */
Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
}
/* Check if this came from kernel mode */
if (Base >= MM_HIGHEST_USER_ADDRESS)
{
/* We should not have a process */
ASSERT(CurrentProcess == NULL);
Mdl->Process = NULL;
AddressSpace = MmGetKernelAddressSpace();
}
else
{
/* Sanity checks */
ASSERT(NrPages != 0);
ASSERT(CurrentProcess == PsGetCurrentProcess());
/* Track locked pages */
InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
NrPages);
/* Save the process */
Mdl->Process = CurrentProcess;
/* Use the process lock */
AddressSpace = &CurrentProcess->Vm;
}
/*
* Lock the pages
*/
if (OldIrql < DISPATCH_LEVEL)
MmLockAddressSpace(AddressSpace);
else
MmAcquirePageListLock(&OldIrql);
for (i = 0; i < NrPages; i++)
{
PVOID Address;
Address = (char*)Mdl->StartVa + (i*PAGE_SIZE);
if (!MmIsPagePresent(NULL, Address))
{
/* Fault the page in */
Status = MmAccessFault(FALSE, Address, AccessMode, NULL);
if (!NT_SUCCESS(Status))
{
goto cleanup;
}
}
else
{
MmLockPage(MmGetPfnForProcess(NULL, Address));
}
if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
(!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
{
Status = MmAccessFault(TRUE, Address, AccessMode, NULL);
if (!NT_SUCCESS(Status))
{
for (j = 0; j < i; j++)
{
Page = MdlPages[j];
if (Page < MmHighestPhysicalPage)
{
MmUnlockPage(Page);
MmDereferencePage(Page);
}
}
goto cleanup;
}
}
Page = MmGetPfnForProcess(NULL, Address);
MdlPages[i] = Page;
if (Page >= MmHighestPhysicalPage)
{
Mdl->MdlFlags |= MDL_IO_SPACE;
}
else
{
MmReferencePage(Page);
}
}
cleanup:
if (OldIrql < DISPATCH_LEVEL)
MmUnlockAddressSpace(AddressSpace);
else
MmReleasePageListLock(OldIrql);
if (!NT_SUCCESS(Status))
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
return;
}
/*
* @implemented
*/
PMDL
NTAPI
MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
IN PHYSICAL_ADDRESS HighAddress,
IN PHYSICAL_ADDRESS SkipBytes,
IN SIZE_T Totalbytes)
{
PMDL Mdl;
PPFN_TYPE Pages;
ULONG NumberOfPagesWanted, NumberOfPagesAllocated;
ULONG Ret;
DPRINT("Allocating pages: %p\n", LowAddress.LowPart);
/* SkipBytes must be a multiple of the page size */
if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
/* Create the actual MDL */
Mdl = MmCreateMdl(NULL, NULL, Totalbytes);
if (!Mdl) return NULL;
/* Allocate pages into the MDL */
NumberOfPagesAllocated = 0;
NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
Pages = (PPFN_TYPE)(Mdl + 1);
while (NumberOfPagesWanted > 0)
{
Ret = MmAllocPagesSpecifyRange(MC_NPPOOL,
LowAddress,
HighAddress,
NumberOfPagesWanted,
Pages + NumberOfPagesAllocated);
if (Ret == (ULONG)-1) break;
NumberOfPagesAllocated += Ret;
NumberOfPagesWanted -= Ret;
if (SkipBytes.QuadPart == 0) break;
LowAddress.QuadPart += SkipBytes.QuadPart;
HighAddress.QuadPart += SkipBytes.QuadPart;
}
/* If nothing was allocated, fail */
if (NumberOfPagesWanted)
{
/* Free our MDL */
ExFreePool(Mdl);
return NULL;
}
/* Zero out the MDL pages */
//RtlZeroMemory(LowAddress.LowPart, NumberOfPagesAllocated * PAGE_SIZE);
/* Return the MDL */
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
Mdl->ByteCount = (ULONG)(NumberOfPagesAllocated * PAGE_SIZE);
return Mdl;
}
/*
* @unimplemented
*/
PMDL
NTAPI
MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
IN PHYSICAL_ADDRESS HighAddress,
IN PHYSICAL_ADDRESS SkipBytes,
IN SIZE_T Totalbytes,
IN MEMORY_CACHING_TYPE CacheType,
IN ULONG Flags)
{
UNIMPLEMENTED;
return NULL;
}
/*
* @implemented
*/
PVOID
NTAPI
MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
IN KPROCESSOR_MODE AccessMode,
IN MEMORY_CACHING_TYPE CacheType,
IN PVOID BaseAddress,
IN ULONG BugCheckOnFailure,
IN MM_PAGE_PRIORITY Priority)
{
PVOID Base;
PULONG MdlPages;
KIRQL oldIrql;
ULONG PageCount;
ULONG StartingOffset;
PEPROCESS CurrentProcess;
NTSTATUS Status;
ULONG Protect;
MEMORY_AREA *Result;
LARGE_INTEGER BoundaryAddressMultiple;
DPRINT("Mapping MDL: %p\n", Mdl);
DPRINT("Base: %p\n", BaseAddress);
/* Sanity checks */
ASSERT(Mdl->ByteCount != 0);
/* Get the base */
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
/* Set default page protection */
Protect = PAGE_READWRITE;
if (CacheType == MmNonCached) Protect |= PAGE_NOCACHE;
/* Handle kernel case first */
if (AccessMode == KernelMode)
{
/* Get the list of pages and count */
MdlPages = (PPFN_NUMBER)(Mdl + 1);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
/* Sanity checks */
ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
/* Allocate that number of pages from the mdl mapping region. */
KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap,
PageCount,
MiMdlMappingRegionHint);
if (StartingOffset == 0xffffffff)
{
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
DPRINT("Out of MDL mapping space\n");
if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure)
{
return NULL;
}
KeBugCheck(MEMORY_MANAGEMENT);
}
Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
if (MiMdlMappingRegionHint == StartingOffset) MiMdlMappingRegionHint += PageCount;
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
/* Set the virtual mappings for the MDL pages. */
if (Mdl->MdlFlags & MDL_IO_SPACE)
{
/* Map the pages */
Status = MmCreateVirtualMappingUnsafe(NULL,
Base,
Protect,
MdlPages,
PageCount);
}
else
{
/* Map the pages */
Status = MmCreateVirtualMapping(NULL,
Base,
Protect,
MdlPages,
PageCount);
}
/* Check if the mapping suceeded */
if (!NT_SUCCESS(Status))
{
/* If it can fail, return NULL */
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
/* Should we bugcheck? */
if (!BugCheckOnFailure) return NULL;
/* Yes, crash the system */
KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
}
/* Mark it as mapped */
ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
/* Check if it was partial */
if (Mdl->MdlFlags & MDL_PARTIAL)
{
/* Write the appropriate flag here too */
Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
}
/* Save the mapped address */
Base = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
Mdl->MappedSystemVa = Base;
return Base;
}
/* Calculate the number of pages required. */
MdlPages = (PPFN_NUMBER)(Mdl + 1);
PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
BoundaryAddressMultiple.QuadPart = 0;
Base = BaseAddress;
CurrentProcess = PsGetCurrentProcess();
MmLockAddressSpace(&CurrentProcess->Vm);
Status = MmCreateMemoryArea(&CurrentProcess->Vm,
MEMORY_AREA_MDL_MAPPING,
&Base,
PageCount * PAGE_SIZE,
Protect,
&Result,
(Base != NULL),
0,
BoundaryAddressMultiple);
MmUnlockAddressSpace(&CurrentProcess->Vm);
if (!NT_SUCCESS(Status))
{
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
return NULL;
}
/* Throw exception */
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
ASSERT(0);
}
/* Set the virtual mappings for the MDL pages. */
if (Mdl->MdlFlags & MDL_IO_SPACE)
{
/* Map the pages */
Status = MmCreateVirtualMappingUnsafe(CurrentProcess,
Base,
Protect,
MdlPages,
PageCount);
}
else
{
/* Map the pages */
Status = MmCreateVirtualMapping(CurrentProcess,
Base,
Protect,
MdlPages,
PageCount);
}
/* Check if the mapping suceeded */
if (!NT_SUCCESS(Status))
{
/* If it can fail, return NULL */
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
/* Throw exception */
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
}
/* Return the base */
Base = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
return Base;
}
/*
* @unimplemented
*/
NTSTATUS
NTAPI
MmAdvanceMdl(IN PMDL Mdl,
IN ULONG NumberOfBytes)
{
UNIMPLEMENTED;
return STATUS_NOT_IMPLEMENTED;
}
/*
* @unimplemented
*/
PVOID
NTAPI
MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
IN ULONG PoolTag,
IN PMDL MemoryDescriptorList,
IN MEMORY_CACHING_TYPE CacheType)
{
UNIMPLEMENTED;
return 0;
}
/*
* @unimplemented
*/
VOID
NTAPI
MmUnmapReservedMapping(IN PVOID BaseAddress,
IN ULONG PoolTag,
IN PMDL MemoryDescriptorList)
{
UNIMPLEMENTED;
}
/*
* @unimplemented
*/
NTSTATUS
NTAPI
MmPrefetchPages(IN ULONG NumberOfLists,
IN PREAD_LIST *ReadLists)
{
UNIMPLEMENTED;
return STATUS_NOT_IMPLEMENTED;
}
/*
* @unimplemented
*/
NTSTATUS
NTAPI
MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
IN ULONG NewProtect)
{
UNIMPLEMENTED;
return STATUS_NOT_IMPLEMENTED;
}
/*
* @unimplemented
*/
VOID
NTAPI
MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
IN PEPROCESS Process,
IN KPROCESSOR_MODE AccessMode,
IN LOCK_OPERATION Operation)
{
UNIMPLEMENTED;
}
/*
* @unimplemented
*/
VOID
NTAPI
MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
IN LARGE_INTEGER PageList[],
IN KPROCESSOR_MODE AccessMode,
IN LOCK_OPERATION Operation)
{
UNIMPLEMENTED;
}
/*
* @unimplemented
*/
VOID
NTAPI
MmMapMemoryDumpMdl(IN PMDL Mdl)
{
UNIMPLEMENTED;
}
/* EOF */

View file

@ -392,9 +392,6 @@ MmInit1(VOID)
/* Initialize paged pool */
MmInitializePagedPool();
/* Initialize MDLs */
MmInitializeMdlImplementation();
/* Initialize working sets */
MmInitializeMemoryConsumer(MC_USER, MmTrimUserMemory);

View file

@ -622,6 +622,115 @@ MiProtectVirtualMemory(IN PEPROCESS Process,
return Status;
}
PVOID
NTAPI
MiMapLockedPagesInUserSpace(IN PMDL Mdl,
IN PVOID BaseVa,
IN MEMORY_CACHING_TYPE CacheType,
IN PVOID BaseAddress)
{
PVOID Base;
PPFN_NUMBER MdlPages;
ULONG PageCount;
PEPROCESS CurrentProcess;
NTSTATUS Status;
ULONG Protect;
MEMORY_AREA *Result;
LARGE_INTEGER BoundaryAddressMultiple;
/* Calculate the number of pages required. */
MdlPages = (PPFN_NUMBER)(Mdl + 1);
PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
/* Set default page protection */
Protect = PAGE_READWRITE;
if (CacheType == MmNonCached) Protect |= PAGE_NOCACHE;
BoundaryAddressMultiple.QuadPart = 0;
Base = BaseAddress;
CurrentProcess = PsGetCurrentProcess();
MmLockAddressSpace(&CurrentProcess->Vm);
Status = MmCreateMemoryArea(&CurrentProcess->Vm,
MEMORY_AREA_MDL_MAPPING,
&Base,
PageCount * PAGE_SIZE,
Protect,
&Result,
(Base != NULL),
0,
BoundaryAddressMultiple);
MmUnlockAddressSpace(&CurrentProcess->Vm);
if (!NT_SUCCESS(Status))
{
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
{
return NULL;
}
/* Throw exception */
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
ASSERT(0);
}
/* Set the virtual mappings for the MDL pages. */
if (Mdl->MdlFlags & MDL_IO_SPACE)
{
/* Map the pages */
Status = MmCreateVirtualMappingUnsafe(CurrentProcess,
Base,
Protect,
MdlPages,
PageCount);
}
else
{
/* Map the pages */
Status = MmCreateVirtualMapping(CurrentProcess,
Base,
Protect,
MdlPages,
PageCount);
}
/* Check if the mapping suceeded */
if (!NT_SUCCESS(Status))
{
/* If it can fail, return NULL */
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
/* Throw exception */
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
}
/* Return the base */
Base = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
return Base;
}
VOID
NTAPI
MiUnmapLockedPagesInUserSpace(IN PVOID BaseAddress,
IN PMDL Mdl)
{
PMEMORY_AREA MemoryArea;
/* Sanity check */
ASSERT(Mdl->Process == PsGetCurrentProcess());
/* Find the memory area */
MemoryArea = MmLocateMemoryAreaByAddress(&Mdl->Process->Vm,
BaseAddress);
ASSERT(MemoryArea);
/* Free it */
MmFreeMemoryArea(&Mdl->Process->Vm,
MemoryArea,
NULL,
NULL);
}
/* PUBLIC FUNCTIONS ***********************************************************/
/*

View file

@ -366,6 +366,7 @@
<file>hypermap.c</file>
<file>init.c</file>
<file>iosup.c</file>
<file>mdlsup.c</file>
<file>pool.c</file>
<file>procsup.c</file>
<file>syspte.c</file>
@ -375,7 +376,6 @@
<file>dbgpool.c</file>
<file>freelist.c</file>
<file>marea.c</file>
<file>mdlsup.c</file>
<file>mmfault.c</file>
<file>mmsup.c</file>
<file>mminit.c</file>