diff --git a/reactos/ntoskrnl/cc/copy.c b/reactos/ntoskrnl/cc/copy.c index 0d8f74b7384..889068b1cf0 100644 --- a/reactos/ntoskrnl/cc/copy.c +++ b/reactos/ntoskrnl/cc/copy.c @@ -247,6 +247,17 @@ WriteCacheSegment(PCACHE_SEGMENT CacheSeg) { Size = CacheSeg->Bcb->CacheSegmentSize; } + // + // Nonpaged pool PDEs in ReactOS must actually be synchronized between the + // MmGlobalPageDirectory and the real system PDE directory. What a mess... + // + { + int i = 0; + do + { + MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)CacheSeg->BaseAddress + (i << PAGE_SHIFT))); + } while (++i < (Size >> PAGE_SHIFT)); + } Mdl = alloca(MmSizeOfMdl(CacheSeg->BaseAddress, Size)); MmInitializeMdl(Mdl, CacheSeg->BaseAddress, Size); MmBuildMdlForNonPagedPool(Mdl); diff --git a/reactos/ntoskrnl/mm/ARM3/mdlsup.c b/reactos/ntoskrnl/mm/ARM3/mdlsup.c new file mode 100644 index 00000000000..6df5c1aa459 --- /dev/null +++ b/reactos/ntoskrnl/mm/ARM3/mdlsup.c @@ -0,0 +1,1318 @@ +/* + * PROJECT: ReactOS Kernel + * LICENSE: BSD - See COPYING.ARM in the top level directory + * FILE: ntoskrnl/mm/ARM3/mdlsup.c + * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management + * PROGRAMMERS: ReactOS Portable Systems Group + */ + +/* INCLUDES *******************************************************************/ + +#include +#define NDEBUG +#include + +#line 15 "ARM³::MDLSUP" +#define MODULE_INVOLVED_IN_ARM3 +#include "../ARM3/miarm.h" + +/* PUBLIC FUNCTIONS ***********************************************************/ + +/* + * @implemented + */ +PMDL +NTAPI +MmCreateMdl(IN PMDL Mdl, + IN PVOID Base, + IN ULONG Length) +{ + ULONG Size; + + // + // Check if we don't have an MDL built + // + if (!Mdl) + { + // + // Calculate the size we'll need and allocate the MDL + // + Size = MmSizeOfMdl(Base, Length); + Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL); + if (!Mdl) return NULL; + } + + // + // Initialize it + // + MmInitializeMdl(Mdl, Base, Length); + return Mdl; +} + +/* + * @implemented + */ +ULONG +NTAPI +MmSizeOfMdl(IN PVOID Base, + IN ULONG Length) +{ + // + // Return the MDL size + // + return sizeof(MDL) + + (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER)); +} + +/* + * @implemented + */ +VOID +NTAPI +MmBuildMdlForNonPagedPool(IN PMDL Mdl) +{ + PPFN_NUMBER MdlPages, EndPage; + PFN_NUMBER Pfn, PageCount; + PVOID Base; + PMMPTE PointerPte; + + // + // Sanity checks + // + ASSERT(Mdl->ByteCount != 0); + ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | + MDL_MAPPED_TO_SYSTEM_VA | + MDL_SOURCE_IS_NONPAGED_POOL | + MDL_PARTIAL)) == 0); + + // + // We know the MDL isn't associated to a process now + // + Mdl->Process = NULL; + + // + // Get page and VA information + // + MdlPages = (PPFN_NUMBER)(Mdl + 1); + Base = Mdl->StartVa; + + // + // Set the system address and now get the page count + // + Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); + PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa, + Mdl->ByteCount); + ASSERT(PageCount != 0); + EndPage = MdlPages + PageCount; + + // + // Loop the PTEs + // + PointerPte = MiAddressToPte(Base); + do + { + // + // Write the PFN + // + Pfn = PFN_FROM_PTE(PointerPte++); + *MdlPages++ = Pfn; + } while (MdlPages < EndPage); + + // + // Set the nonpaged pool flag + // + Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL; + + // + // Check if this is an I/O mapping + // + if (Pfn > MmHighestPhysicalPage) Mdl->MdlFlags |= MDL_IO_SPACE; +} + +/* + * @implemented + */ +PMDL +NTAPI +MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress, + IN PHYSICAL_ADDRESS HighAddress, + IN PHYSICAL_ADDRESS SkipBytes, + IN SIZE_T TotalBytes) +{ + // + // Call the internal routine + // + return MiAllocatePagesForMdl(LowAddress, + HighAddress, + SkipBytes, + TotalBytes, + MiNotMapped, + 0); +} + +/* + * @implemented + */ +PMDL +NTAPI +MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress, + IN PHYSICAL_ADDRESS HighAddress, + IN PHYSICAL_ADDRESS SkipBytes, + IN SIZE_T TotalBytes, + IN MEMORY_CACHING_TYPE CacheType, + IN ULONG Flags) +{ + MI_PFN_CACHE_ATTRIBUTE CacheAttribute; + + // + // Check for invalid cache type + // + if (CacheType > MmWriteCombined) + { + // + // Normalize to default + // + CacheAttribute = MiNotMapped; + } + else + { + // + // Conver to internal caching attribute + // + CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType]; + } + + // + // Only these flags are allowed + // + if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY)) + { + // + // Silently fail + // + return NULL; + } + + // + // Call the internal routine + // + return MiAllocatePagesForMdl(LowAddress, + HighAddress, + SkipBytes, + TotalBytes, + CacheAttribute, + Flags); +} + +/* + * @implemented + */ +VOID +NTAPI +MmFreePagesFromMdl(IN PMDL Mdl) +{ + PVOID Base; + PPFN_NUMBER Pages; + LONG NumberOfPages; + PMMPFN Pfn1; + DPRINT("Freeing MDL: %p\n", Mdl); + + // + // Sanity checks + // + ASSERT(KeGetCurrentIrql() <= APC_LEVEL); + ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0); + ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); + + // + // Get address and page information + // + Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); + NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); + + // + // Loop all the MDL pages + // + Pages = (PPFN_NUMBER)(Mdl + 1); + do + { + // + // Reached the last page + // + if (*Pages == -1) break; + + // + // Sanity check + // + ASSERT(*Pages <= MmHighestPhysicalPage); + + // + // Get the page entry + // + Pfn1 = MiGetPfnEntry(*Pages); + ASSERT(Pfn1->u3.ReferenceCount == 1); + + // + // Clear it + // + Pfn1->u3.e1.StartOfAllocation = 0; + Pfn1->u3.e1.EndOfAllocation = 0; + + // + // Dereference it + // + MmDereferencePage(*Pages); + + // + // Clear this page and move on + // + *Pages++ = -1; + } while (--NumberOfPages != 0); + + // + // Remove the pages locked flag + // + Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; +} + +/* + * @implemented + */ +PVOID +NTAPI +MmMapLockedPagesSpecifyCache(IN PMDL Mdl, + IN KPROCESSOR_MODE AccessMode, + IN MEMORY_CACHING_TYPE CacheType, + IN PVOID BaseAddress, + IN ULONG BugCheckOnFailure, + IN MM_PAGE_PRIORITY Priority) +{ + PVOID Base; + PPFN_NUMBER MdlPages, LastPage; + PFN_NUMBER PageCount; + BOOLEAN IsIoMapping; + MI_PFN_CACHE_ATTRIBUTE CacheAttribute; + PMMPTE PointerPte; + MMPTE TempPte; + + // + // Sanity check + // + ASSERT(Mdl->ByteCount != 0); + + // + // Get the base + // + Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); + + // + // Handle kernel case first + // + if (AccessMode == KernelMode) + { + // + // Get the list of pages and count + // + MdlPages = (PPFN_NUMBER)(Mdl + 1); + PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); + LastPage = MdlPages + PageCount; + + // + // Sanity checks + // + ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | + MDL_SOURCE_IS_NONPAGED_POOL | + MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0); + ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0); + + // + // Get the correct cache type + // + IsIoMapping = Mdl->MdlFlags & MDL_IO_SPACE; + CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; + + // + // Reserve the PTEs + // + PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace); + if (!PointerPte) + { + // + // If it can fail, return NULL + // + if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL; + + // + // Should we bugcheck? + // + if (!BugCheckOnFailure) return NULL; + + // + // Yes, crash the system + // + KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0); + } + + // + // Get the mapped address + // + Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset); + + // + // Get the template + // + TempPte = HyperTemplatePte; + switch (CacheAttribute) + { + case MiNonCached: + + // + // Disable caching + // + TempPte.u.Hard.CacheDisable = 1; + TempPte.u.Hard.WriteThrough = 1; + break; + + case MiWriteCombined: + + // + // Enable write combining + // + TempPte.u.Hard.CacheDisable = 1; + TempPte.u.Hard.WriteThrough = 0; + break; + + default: + // + // Nothing to do + // + break; + } + + // + // Loop all PTEs + // + do + { + // + // We're done here + // + if (*MdlPages == -1) break; + + // + // Write the PTE + // + ASSERT(PointerPte->u.Hard.Valid == 0); + TempPte.u.Hard.PageFrameNumber = *MdlPages; + *PointerPte++ = TempPte; + } while (++MdlPages < LastPage); + + // + // Mark it as mapped + // + ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0); + Mdl->MappedSystemVa = Base; + Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; + + // + // Check if it was partial + // + if (Mdl->MdlFlags & MDL_PARTIAL) + { + // + // Write the appropriate flag here too + // + Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED; + } + + // + // Return the mapped address + // + return Base; + } + + // + // In user-mode, let ReactOS do it + // + return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress); +} + +/* + * @implemented + */ +PVOID +NTAPI +MmMapLockedPages(IN PMDL Mdl, + IN KPROCESSOR_MODE AccessMode) +{ + // + // Call the extended version + // + return MmMapLockedPagesSpecifyCache(Mdl, + AccessMode, + MmCached, + NULL, + TRUE, + HighPagePriority); +} + +/* + * @implemented + */ +VOID +NTAPI +MmUnmapLockedPages(IN PVOID BaseAddress, + IN PMDL Mdl) +{ + PVOID Base; + PFN_NUMBER PageCount; + PPFN_NUMBER MdlPages; + PMMPTE PointerPte; + + // + // Sanity check + // + ASSERT(Mdl->ByteCount != 0); + + // + // Check if this is a kernel request + // + if (BaseAddress > MM_HIGHEST_USER_ADDRESS) + { + // + // Get base and count information + // + Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); + PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); + + // + // Sanity checks + // + ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); + ASSERT(PageCount != 0); + ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA); + + // + // Get the PTE + // + PointerPte = MiAddressToPte(BaseAddress); + ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); + ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); + + // + // Check if the caller wants us to free advanced pages + // + if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) + { + // + // Get the MDL page array + // + MdlPages = (PPFN_NUMBER)(Mdl + 1); + MdlPages += PageCount; + + // + // Do the math + // + PageCount += *MdlPages; + PointerPte -= *MdlPages; + ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); + ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); + + // + // Get the new base address + // + BaseAddress = (PVOID)((ULONG_PTR)BaseAddress - + ((*MdlPages) << PAGE_SHIFT)); + } + + // + // Remove flags + // + Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | + MDL_PARTIAL_HAS_BEEN_MAPPED | + MDL_FREE_EXTRA_PTES); + + // + // Release the system PTEs + // + MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace); + } + else + { + // + // Let ReactOS handle it + // + MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl); + } +} + +/* + * @implemented + */ +VOID +NTAPI +MmProbeAndLockPages(IN PMDL Mdl, + IN KPROCESSOR_MODE AccessMode, + IN LOCK_OPERATION Operation) +{ + PPFN_TYPE MdlPages; + PVOID Base, Address, LastAddress, StartAddress; + ULONG LockPages, TotalPages; + NTSTATUS Status = STATUS_SUCCESS; + PEPROCESS CurrentProcess; + PETHREAD Thread; + PMMSUPPORT AddressSpace; + NTSTATUS ProbeStatus; + PMMPTE PointerPte, PointerPde, LastPte; + PFN_NUMBER PageFrameIndex; + PMMPFN Pfn1; + BOOLEAN UsePfnLock; + KIRQL OldIrql; + DPRINT("Probing MDL: %p\n", Mdl); + + // + // Sanity checks + // + ASSERT(Mdl->ByteCount != 0); + ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0); + ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); + ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | + MDL_MAPPED_TO_SYSTEM_VA | + MDL_SOURCE_IS_NONPAGED_POOL | + MDL_PARTIAL | + MDL_IO_SPACE)) == 0); + + // + // Get page and base information + // + MdlPages = (PPFN_NUMBER)(Mdl + 1); + Base = (PVOID)Mdl->StartVa; + + // + // Get the addresses and how many pages we span (and need to lock) + // + Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); + LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount); + LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount); + ASSERT(LockPages != 0); + + // + // Get the thread and process + // + Thread = PsGetCurrentThread(); + if (Address <= MM_HIGHEST_USER_ADDRESS) + { + // + // Get the process + // + CurrentProcess = PsGetCurrentProcess(); + } + else + { + // + // No process + // + CurrentProcess = NULL; + } + + // + // Save the number of pages we'll have to lock, and the start address + // + TotalPages = LockPages; + StartAddress = Address; + + // + // Now probe them + // + ProbeStatus = STATUS_SUCCESS; + _SEH2_TRY + { + // + // Enter probe loop + // + do + { + // + // Assume failure + // + *MdlPages = -1; + + // + // Read + // + *(volatile CHAR*)Address; + + // + // Check if this is write access (only probe for user-mode) + // + if ((Operation != IoReadAccess) && + (Address <= MM_HIGHEST_USER_ADDRESS)) + { + // + // Probe for write too + // + ProbeForWriteChar(Address); + } + + // + // Next address... + // + Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE); + Address = PAGE_ALIGN(Address); + + // + // Next page... + // + LockPages--; + MdlPages++; + } while (Address < LastAddress); + + // + // Reset back to the original page + // + ASSERT(LockPages == 0); + MdlPages = (PPFN_NUMBER)(Mdl + 1); + } + _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) + { + // + // Oops :( + // + ProbeStatus = _SEH2_GetExceptionCode(); + } + _SEH2_END; + + // + // So how did that go? + // + if (ProbeStatus != STATUS_SUCCESS) + { + // + // Fail + // + DPRINT1("MDL PROBE FAILED!\n"); + Mdl->Process = NULL; + ExRaiseStatus(ProbeStatus); + } + + // + // Get the PTE and PDE + // + PointerPte = MiAddressToPte(StartAddress); + PointerPde = MiAddressToPde(StartAddress); + + // + // Sanity check + // + ASSERT(MdlPages = (PPFN_NUMBER)(Mdl + 1)); + + // + // Check what kind of operation this is + // + if (Operation != IoReadAccess) + { + // + // Set the write flag + // + Mdl->MdlFlags |= MDL_WRITE_OPERATION; + } + else + { + // + // Remove the write flag + // + Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION); + } + + // + // Mark the MDL as locked *now* + // + Mdl->MdlFlags |= MDL_PAGES_LOCKED; + + // + // Check if this came from kernel mode + // + if (Base >= MM_HIGHEST_USER_ADDRESS) + { + // + // We should not have a process + // + ASSERT(CurrentProcess == NULL); + Mdl->Process = NULL; + + // + // In kernel mode, we don't need to check for write access + // + Operation = IoReadAccess; + + // + // Use the PFN lock + // + UsePfnLock = TRUE; + OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); + AddressSpace = NULL; // Keep compiler happy + } + else + { + // + // Sanity checks + // + ASSERT(TotalPages != 0); + ASSERT(CurrentProcess == PsGetCurrentProcess()); + + // + // Track locked pages + // + InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages, + TotalPages); + + // + // Save the process + // + Mdl->Process = CurrentProcess; + + // + // Use the process lock + // + UsePfnLock = FALSE; + AddressSpace = &CurrentProcess->Vm; + MmLockAddressSpace(AddressSpace); + OldIrql = DISPATCH_LEVEL; // Keep compiler happy + } + + // + // Get the last PTE + // + LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1)); + + // + // Loop the pages + // + do + { + // + // Assume failure and check for non-mapped pages + // + *MdlPages = -1; + while ((PointerPde->u.Hard.Valid == 0) || + (PointerPte->u.Hard.Valid == 0)) + { + // + // What kind of lock where we using? + // + if (UsePfnLock) + { + // + // Release PFN lock + // + KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); + } + else + { + // + // Release process address space lock + // + MmUnlockAddressSpace(AddressSpace); + } + + // + // Access the page + // + Address = MiPteToAddress(PointerPte); + Status = MmAccessFault(FALSE, Address, KernelMode, NULL); + if (!NT_SUCCESS(Status)) + { + // + // Fail + // + DPRINT1("Access fault failed\n"); + goto Cleanup; + } + + // + // Waht lock should we use? + // + if (UsePfnLock) + { + // + // Grab the PFN lock + // + OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); + } + else + { + // + // Use the address space lock + // + MmLockAddressSpace(AddressSpace); + } + } + + // + // Check if this was a write or modify + // + if (Operation != IoReadAccess) + { + // + // Check if the PTE is not writable + // + if (!PointerPte->u.Hard.Write) + { + // + // Check if it's copy on write + // + if (PointerPte->u.Hard.CopyOnWrite) + { + // + // Get the base address and allow a change for user-mode + // + Address = MiPteToAddress(PointerPte); + if (Address <= MM_HIGHEST_USER_ADDRESS) + { + // + // What kind of lock where we using? + // + if (UsePfnLock) + { + // + // Release PFN lock + // + KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); + } + else + { + // + // Release process address space lock + // + MmUnlockAddressSpace(AddressSpace); + } + + // + // Access the page + // + Status = MmAccessFault(TRUE, Address, KernelMode, NULL); + if (!NT_SUCCESS(Status)) + { + // + // Fail + // + DPRINT1("Access fault failed\n"); + goto Cleanup; + } + + // + // Re-acquire the lock + // + if (UsePfnLock) + { + // + // Grab the PFN lock + // + OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); + } + else + { + // + // Use the address space lock + // + MmLockAddressSpace(AddressSpace); + } + + // + // Start over + // + continue; + } + } + + // + // Fail, since we won't allow this + // + Status = STATUS_ACCESS_VIOLATION; + goto CleanupWithLock; + } + } + + // + // Grab the PFN + // + PageFrameIndex = PFN_FROM_PTE(PointerPte); + if (PageFrameIndex < MmHighestPhysicalPage) + { + // + // Get the PFN entry + // + Pfn1 = MiGetPfnEntry(PageFrameIndex); + ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE)); + + // + // Now lock the page + // + MmReferencePage(PageFrameIndex); + MmLockPage(PageFrameIndex); + } + else + { + // + // For I/O addresses, just remember this + // + Mdl->MdlFlags |= MDL_IO_SPACE; + } + + // + // Write the page and move on + // + *MdlPages++ = PageFrameIndex; + if (!((ULONG_PTR)(++PointerPte) & (PAGE_SIZE - 1))) PointerPde++; + } while (PointerPte <= LastPte); + + // + // What kind of lock where we using? + // + if (UsePfnLock) + { + // + // Release PFN lock + // + KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); + } + else + { + // + // Release process address space lock + // + MmUnlockAddressSpace(AddressSpace); + } + + // + // Sanity check + // + ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0); + return; + +CleanupWithLock: + // + // This is the failure path + // + ASSERT(!NT_SUCCESS(Status)); + + // + // What kind of lock where we using? + // + if (UsePfnLock) + { + // + // Release PFN lock + // + KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); + } + else + { + // + // Release process address space lock + // + MmUnlockAddressSpace(AddressSpace); + } +Cleanup: + // + // Pages must be locked so MmUnlock can work + // + ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED); + MmUnlockPages(Mdl); + + // + // Raise the error + // + ExRaiseStatus(Status); +} + +/* + * @implemented + */ +VOID +NTAPI +MmUnlockPages(IN PMDL Mdl) +{ + PPFN_NUMBER MdlPages, LastPage; + PEPROCESS Process; + PVOID Base; + ULONG Flags, PageCount; + KIRQL OldIrql; + DPRINT("Unlocking MDL: %p\n", Mdl); + + // + // Sanity checks + // + ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0); + ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0); + ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0); + ASSERT(Mdl->ByteCount != 0); + + // + // Get the process associated and capture the flags which are volatile + // + Process = Mdl->Process; + Flags = Mdl->MdlFlags; + + // + // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL + // + if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) + { + // + // Unmap the pages from system space + // + MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl); + } + + // + // Get the page count + // + MdlPages = (PPFN_NUMBER)(Mdl + 1); + Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); + PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); + ASSERT(PageCount != 0); + + // + // We don't support AWE + // + if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE); + + // + // Check if the buffer is mapped I/O space + // + if (Flags & MDL_IO_SPACE) + { + // + // Acquire PFN lock + // + OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); + + // + // Loop every page + // + LastPage = MdlPages + PageCount; + do + { + // + // Last page, break out + // + if (*MdlPages == -1) break; + + // + // Check if this page is in the PFN database + // + if (*MdlPages <= MmHighestPhysicalPage) + { + // + // Unlock and dereference + // + MmUnlockPage(*MdlPages); + MmDereferencePage(*MdlPages); + } + } while (++MdlPages < LastPage); + + // + // Release the lock + // + KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); + + // + // Check if we have a process + // + if (Process) + { + // + // Handle the accounting of locked pages + // + ASSERT(Process->NumberOfLockedPages > 0); + InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, + -PageCount); + } + + // + // We're done + // + Mdl->MdlFlags &= ~MDL_IO_SPACE; + Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; + return; + } + + // + // Check if we have a process + // + if (Process) + { + // + // Handle the accounting of locked pages + // + ASSERT(Process->NumberOfLockedPages > 0); + InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, + -PageCount); + } + + // + // Loop every page + // + LastPage = MdlPages + PageCount; + do + { + // + // Last page reached + // + if (*MdlPages == -1) + { + // + // Were there no pages at all? + // + if (MdlPages == (PPFN_NUMBER)(Mdl + 1)) + { + // + // We're already done + // + Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; + return; + } + + // + // Otherwise, stop here + // + LastPage = MdlPages; + break; + } + + // + // Sanity check + // + ASSERT(*MdlPages <= MmHighestPhysicalPage); + } while (++MdlPages < LastPage); + + // + // Reset pointer + // + MdlPages = (PPFN_NUMBER)(Mdl + 1); + + // + // Now grab the PFN lock for the actual unlock and dereference + // + OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); + do + { + // + // Unlock and dereference + // + MmUnlockPage(*MdlPages); + MmDereferencePage(*MdlPages); + } while (++MdlPages < LastPage); + + // + // Release the lock + // + KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); + + // + // We're done + // + Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; +} + +/* + * @unimplemented + */ +NTSTATUS +NTAPI +MmAdvanceMdl(IN PMDL Mdl, + IN ULONG NumberOfBytes) +{ + UNIMPLEMENTED; + return STATUS_NOT_IMPLEMENTED; +} + +/* + * @unimplemented + */ +PVOID +NTAPI +MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress, + IN ULONG PoolTag, + IN PMDL MemoryDescriptorList, + IN MEMORY_CACHING_TYPE CacheType) +{ + UNIMPLEMENTED; + return 0; +} + +/* + * @unimplemented + */ +VOID +NTAPI +MmUnmapReservedMapping(IN PVOID BaseAddress, + IN ULONG PoolTag, + IN PMDL MemoryDescriptorList) +{ + UNIMPLEMENTED; +} + +/* + * @unimplemented + */ +NTSTATUS +NTAPI +MmPrefetchPages(IN ULONG NumberOfLists, + IN PREAD_LIST *ReadLists) +{ + UNIMPLEMENTED; + return STATUS_NOT_IMPLEMENTED; +} + +/* + * @unimplemented + */ +NTSTATUS +NTAPI +MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList, + IN ULONG NewProtect) +{ + UNIMPLEMENTED; + return STATUS_NOT_IMPLEMENTED; +} + +/* + * @unimplemented + */ +VOID +NTAPI +MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList, + IN PEPROCESS Process, + IN KPROCESSOR_MODE AccessMode, + IN LOCK_OPERATION Operation) +{ + UNIMPLEMENTED; +} + + +/* + * @unimplemented + */ +VOID +NTAPI +MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList, + IN LARGE_INTEGER PageList[], + IN KPROCESSOR_MODE AccessMode, + IN LOCK_OPERATION Operation) +{ + UNIMPLEMENTED; +} + +/* + * @unimplemented + */ +VOID +NTAPI +MmMapMemoryDumpMdl(IN PMDL Mdl) +{ + UNIMPLEMENTED; +} + +/* EOF */ diff --git a/reactos/ntoskrnl/mm/ARM3/miarm.h b/reactos/ntoskrnl/mm/ARM3/miarm.h index e7f55122ec3..86b498a6e0b 100644 --- a/reactos/ntoskrnl/mm/ARM3/miarm.h +++ b/reactos/ntoskrnl/mm/ARM3/miarm.h @@ -53,6 +53,8 @@ extern PMMPTE MiFirstReservedZeroingPte; extern MI_PFN_CACHE_ATTRIBUTE MiPlatformCacheAttributes[2][MmMaximumCacheType]; extern PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock; extern ULONG MmBootImageSize; +extern PMMPTE MmSystemPtesStart[MaximumPtePoolTypes]; +extern PMMPTE MmSystemPtesEnd[MaximumPtePoolTypes]; VOID NTAPI @@ -106,4 +108,31 @@ MiCheckForContiguousMemory( IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute ); +PMDL +NTAPI +MiAllocatePagesForMdl( + IN PHYSICAL_ADDRESS LowAddress, + IN PHYSICAL_ADDRESS HighAddress, + IN PHYSICAL_ADDRESS SkipBytes, + IN SIZE_T TotalBytes, + IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute, + IN ULONG Flags +); + +PVOID +NTAPI +MiMapLockedPagesInUserSpace( + IN PMDL Mdl, + IN PVOID BaseVa, + IN MEMORY_CACHING_TYPE CacheType, + IN PVOID BaseAddress +); + +VOID +NTAPI +MiUnmapLockedPagesInUserSpace( + IN PVOID BaseAddress, + IN PMDL Mdl +); + /* EOF */ diff --git a/reactos/ntoskrnl/mm/freelist.c b/reactos/ntoskrnl/mm/freelist.c index ab83236deaf..4f02e377ef6 100644 --- a/reactos/ntoskrnl/mm/freelist.c +++ b/reactos/ntoskrnl/mm/freelist.c @@ -432,13 +432,280 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn, } } while (++i != MmPhysicalMemoryBlock->NumberOfRuns); - // // And if we get here, it means no suitable physical memory runs were found // return 0; } +PMDL +NTAPI +MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress, + IN PHYSICAL_ADDRESS HighAddress, + IN PHYSICAL_ADDRESS SkipBytes, + IN SIZE_T TotalBytes, + IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute, + IN ULONG MdlFlags) +{ + PMDL Mdl; + PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page; + PPFN_NUMBER MdlPage, LastMdlPage; + KIRQL OldIrql; + PLIST_ENTRY ListEntry; + PPHYSICAL_PAGE Pfn1; + ULONG LookForZeroedPages; + ASSERT (KeGetCurrentIrql() <= APC_LEVEL); + + // + // Convert the low address into a PFN + // + LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT); + + // + // Convert, and normalize, the high address into a PFN + // + HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT); + if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage; + + // + // Validate skipbytes and convert them into pages + // + if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL; + SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT); + + // + // Now compute the number of pages the MDL will cover + // + PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes); + do + { + // + // Try creating an MDL for these many pages + // + Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT); + if (Mdl) break; + + // + // This function is not required to return the amount of pages requested + // In fact, it can return as little as 1 page, and callers are supposed + // to deal with this scenario. So re-attempt the allocation with less + // pages than before, and see if it worked this time. + // + PageCount -= (PageCount >> 4); + } while (PageCount); + + // + // Wow, not even a single page was around! + // + if (!Mdl) return NULL; + + // + // This is where the page array starts.... + // + MdlPage = (PPFN_NUMBER)(Mdl + 1); + + // + // Lock the PFN database + // + OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); + + // + // Are we looking for any pages, without discriminating? + // + if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage)) + { + // + // Well then, let's go shopping + // + while (PagesFound < PageCount) + { + // + // Do we have zeroed pages? + // + if (!IsListEmpty(&FreeZeroedPageListHead)) + { + // + // Grab a zero page + // + ListEntry = RemoveTailList(&FreeZeroedPageListHead); + } + else if (!IsListEmpty(&FreeUnzeroedPageListHead)) + { + // + // Nope, grab an unzeroed page + // + ListEntry = RemoveTailList(&FreeUnzeroedPageListHead); + UnzeroedPageCount--; + } + else + { + // + // This is not good... hopefully we have at least SOME pages + // + ASSERT(PagesFound); + break; + } + + // + // Get the PFN entry for this page + // + Pfn1 = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry); + + // + // Make sure it's really free + // + ASSERT(Pfn1->Flags.Type == MM_PHYSICAL_PAGE_FREE); + ASSERT(Pfn1->MapCount == 0); + ASSERT(Pfn1->ReferenceCount == 0); + + // + // Allocate it and mark it + // + Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED; + Pfn1->Flags.Consumer = MC_NPPOOL; + Pfn1->Flags.StartOfAllocation = 1; + Pfn1->Flags.EndOfAllocation = 1; + Pfn1->ReferenceCount = 1; + Pfn1->LockCount = 0; + Pfn1->MapCount = 0; + Pfn1->SavedSwapEntry = 0; + + // + // Decrease available pages + // + MmStats.NrSystemPages++; + MmStats.NrFreePages--; + + // + // Save it into the MDL + // + *MdlPage++ = MiGetPfnEntryIndex(Pfn1); + PagesFound++; + } + } + else + { + // + // You want specific range of pages. We'll do this in two runs + // + for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--) + { + // + // Scan the range you specified + // + for (Page = LowPage; Page < HighPage; Page++) + { + // + // Get the PFN entry for this page + // + Pfn1 = MiGetPfnEntry(Page); + + // + // Make sure it's free and if this is our first pass, zeroed + // + if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) continue; + if (Pfn1->Flags.Zero != LookForZeroedPages) continue; + + // + // Sanity checks + // + ASSERT(Pfn1->MapCount == 0); + ASSERT(Pfn1->ReferenceCount == 0); + + // + // Now setup the page and mark it + // + Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED; + Pfn1->Flags.Consumer = MC_NPPOOL; + Pfn1->ReferenceCount = 1; + Pfn1->Flags.StartOfAllocation = 1; + Pfn1->Flags.EndOfAllocation = 1; + Pfn1->LockCount = 0; + Pfn1->MapCount = 0; + Pfn1->SavedSwapEntry = 0; + + // + // If this page was unzeroed, we've consumed such a page + // + if (!Pfn1->Flags.Zero) UnzeroedPageCount--; + + // + // Decrease available pages + // + MmStats.NrSystemPages++; + MmStats.NrFreePages--; + + // + // Save this page into the MDL + // + *MdlPage++ = Page; + if (++PagesFound == PageCount) break; + } + + // + // If the first pass was enough, don't keep going, otherwise, go again + // + if (PagesFound == PageCount) break; + } + } + + // + // Now release the PFN count + // + KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); + + // + // We might've found less pages, but not more ;-) + // + if (PagesFound != PageCount) ASSERT(PagesFound < PageCount); + if (!PagesFound) + { + // + // If we didn' tfind any pages at all, fail + // + DPRINT1("NO MDL PAGES!\n"); + ExFreePool(Mdl); + return NULL; + } + + // + // Write out how many pages we found + // + Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT); + + // + // Terminate the MDL array if there's certain missing pages + // + if (PagesFound != PageCount) *MdlPage = -1; + + // + // Now go back and loop over all the MDL pages + // + MdlPage = (PPFN_NUMBER)(Mdl + 1); + LastMdlPage = MdlPage + PagesFound; + while (MdlPage < LastMdlPage) + { + // + // Check if we've reached the end + // + Page = *MdlPage; + if (Page == -1) break; + + // + // Get the PFN entry for the page and check if we should zero it out + // + Pfn1 = MiGetPfnEntry(Page); + if (Pfn1->Flags.Zero == 0) MiZeroPage(Page); + } + + // + // We're done, mark the pages as locked (should we lock them, though???) + // + Mdl->Process = NULL; + Mdl->MdlFlags |= MDL_PAGES_LOCKED; + return Mdl; +} + PFN_TYPE NTAPI MmAllocEarlyPage(VOID) diff --git a/reactos/ntoskrnl/mm/mdlsup.c b/reactos/ntoskrnl/mm/mdlsup.c deleted file mode 100644 index 22f9963d6de..00000000000 --- a/reactos/ntoskrnl/mm/mdlsup.c +++ /dev/null @@ -1,918 +0,0 @@ -/* - * COPYRIGHT: See COPYING in the top level directory - * PROJECT: ReactOS kernel - * FILE: ntoskrnl/mm/mdl.c - * PURPOSE: Manipulates MDLs - * - * PROGRAMMERS: David Welch (welch@cwcom.net) - */ - -/* INCLUDES ****************************************************************/ - -#include -#define NDEBUG -#include - -#if defined (ALLOC_PRAGMA) -#pragma alloc_text(INIT, MmInitializeMdlImplementation) -#endif - -/* GLOBALS *******************************************************************/ - -#define TAG_MDL TAG('M', 'D', 'L', ' ') -#define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024) - -PVOID MiMdlMappingRegionBase = NULL; -RTL_BITMAP MiMdlMappingRegionAllocMap; -ULONG MiMdlMappingRegionHint; -KSPIN_LOCK MiMdlMappingRegionLock; - -/* PRIVATE FUNCTIONS **********************************************************/ - -VOID -INIT_FUNCTION -NTAPI -MmInitializeMdlImplementation(VOID) -{ - MEMORY_AREA* Result; - NTSTATUS Status; - PVOID Buffer; - PHYSICAL_ADDRESS BoundaryAddressMultiple; - - BoundaryAddressMultiple.QuadPart = 0; - MiMdlMappingRegionHint = 0; - MiMdlMappingRegionBase = NULL; - - MmLockAddressSpace(MmGetKernelAddressSpace()); - Status = MmCreateMemoryArea(MmGetKernelAddressSpace(), - MEMORY_AREA_MDL_MAPPING, - &MiMdlMappingRegionBase, - MI_MDL_MAPPING_REGION_SIZE, - PAGE_READWRITE, - &Result, - FALSE, - 0, - BoundaryAddressMultiple); - if (!NT_SUCCESS(Status)) - { - MmUnlockAddressSpace(MmGetKernelAddressSpace()); - KeBugCheck(MEMORY_MANAGEMENT); - } - MmUnlockAddressSpace(MmGetKernelAddressSpace()); - - Buffer = ExAllocatePoolWithTag(NonPagedPool, - MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8), - TAG_MDL); - - RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE); - RtlClearAllBits(&MiMdlMappingRegionAllocMap); - - KeInitializeSpinLock(&MiMdlMappingRegionLock); -} - -/* PUBLIC FUNCTIONS ***********************************************************/ - - -/* - * @implemented - */ -PMDL -NTAPI -MmCreateMdl(IN PMDL Mdl, - IN PVOID Base, - IN ULONG Length) -{ - ULONG Size; - - /* Check if we don't have an MDL built */ - if (!Mdl) - { - /* Calcualte the size we'll need and allocate the MDL */ - Size = MmSizeOfMdl(Base, Length); - Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL); - if (!Mdl) return NULL; - } - - /* Initialize it */ - MmInitializeMdl(Mdl, Base, Length); - DPRINT("Creating MDL: %p\n", Mdl); - DPRINT("Base: %p. Length: %lx\n", Base, Length); - return Mdl; -} - -/* - * @implemented - */ -ULONG -NTAPI -MmSizeOfMdl(IN PVOID Base, - IN ULONG Length) -{ - /* Return the MDL size */ - return sizeof(MDL) + (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER)); -} - -/* - * @implemented - */ -VOID -NTAPI -MmBuildMdlForNonPagedPool(IN PMDL Mdl) -{ - ULONG i; - ULONG PageCount; - PPFN_NUMBER MdlPages; - PVOID Base; - DPRINT("Building MDL: %p\n", Mdl); - - /* Sanity checks */ - ASSERT(Mdl->ByteCount != 0); - ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | - MDL_MAPPED_TO_SYSTEM_VA | - MDL_SOURCE_IS_NONPAGED_POOL | - MDL_PARTIAL)) == 0); - - /* We know the MDL isn't associated to a process now */ - Mdl->Process = NULL; - - /* Get page and VA information */ - MdlPages = (PPFN_NUMBER)(Mdl + 1); - Base = Mdl->StartVa; - - /* Set the system address and now get the page count */ - Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); - PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa, Mdl->ByteCount); - ASSERT(PageCount != 0); - - /* Go through each page */ - for (i = 0; i < PageCount; i++) - { - /* Map it */ - *MdlPages++ = MmGetPfnForProcess(NULL, - (PVOID)((ULONG_PTR)Base + (i * PAGE_SIZE))); - } - - /* Set the final flag */ - Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL; -} - -/* - * @implemented - */ -VOID -NTAPI -MmFreePagesFromMdl(IN PMDL Mdl) -{ - PVOID Base; - PPFN_NUMBER Pages; - LONG NumberOfPages; - DPRINT("Freeing MDL: %p\n", Mdl); - - /* Sanity checks */ - ASSERT(KeGetCurrentIrql() <= APC_LEVEL); - ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0); - ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); - - /* Get address and page information */ - Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); - NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); - - /* Loop all the MDL pages */ - Pages = (PPFN_NUMBER)(Mdl + 1); - while (--NumberOfPages >= 0) - { - /* Dereference each one of them */ - MmDereferencePage(Pages[NumberOfPages]); - } - - /* Remove the pages locked flag */ - Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; -} - -/* - * @implemented - */ -PVOID -NTAPI -MmMapLockedPages(IN PMDL Mdl, - IN KPROCESSOR_MODE AccessMode) -{ - /* Call the extended version */ - return MmMapLockedPagesSpecifyCache(Mdl, - AccessMode, - MmCached, - NULL, - TRUE, - HighPagePriority); -} - -/* - * @implemented - */ -VOID -NTAPI -MmUnlockPages(IN PMDL Mdl) -{ - ULONG i; - PPFN_NUMBER MdlPages; - PFN_NUMBER Page; - PEPROCESS Process; - PVOID Base; - ULONG Flags, PageCount; - DPRINT("Unlocking MDL: %p\n", Mdl); - - /* Sanity checks */ - ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0); - ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0); - ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0); - ASSERT(Mdl->ByteCount != 0); - - /* Get the process associated and capture the flags which are volatile */ - Process = Mdl->Process; - Flags = Mdl->MdlFlags; - - /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */ - if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) - { - /* Unmap the pages from system spage */ - MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl); - } - - /* Get the page count */ - MdlPages = (PPFN_NUMBER)(Mdl + 1); - Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); - PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); - ASSERT(PageCount != 0); - - /* We don't support AWE */ - if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE); - - /* Check if the buffer is mapped I/O space */ - if (Flags & MDL_IO_SPACE) - { - /* Check if this was a wirte */ - if (Flags & MDL_WRITE_OPERATION) - { - /* Windows keeps track of the modified bit */ - } - - /* Check if we have a process */ - if (Process) - { - /* Handle the accounting of locked pages */ - ASSERT(Process->NumberOfLockedPages > 0); - InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, - -PageCount); - } - - /* We're done */ - Mdl->MdlFlags &= ~MDL_IO_SPACE; - Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; - return; - } - - /* Check if we have a process */ - if (Process) - { - /* Handle the accounting of locked pages */ - ASSERT(Process->NumberOfLockedPages > 0); - InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, - -PageCount); - } - - /* Scan each page */ - for (i = 0; i < PageCount; i++) - { - /* Get the page entry */ - - /* Unlock and dereference it */ - Page = MdlPages[i]; - MmUnlockPage(Page); - MmDereferencePage(Page); - } - - /* We're done */ - Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; -} - -/* - * @implemented - */ -VOID -NTAPI -MmUnmapLockedPages(IN PVOID BaseAddress, - IN PMDL Mdl) -{ - KIRQL oldIrql; - ULONG i, PageCount; - ULONG Base; - MEMORY_AREA *MemoryArea; - DPRINT("Unmapping MDL: %p\n", Mdl); - DPRINT("Base: %p\n", BaseAddress); - - /* Sanity check */ - ASSERT(Mdl->ByteCount != 0); - - /* Check if this is a kernel request */ - if (BaseAddress > MM_HIGHEST_USER_ADDRESS) - { - /* Get base and count information */ - Base = (ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset; - PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); - - /* Sanity checks */ - ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); - ASSERT(PageCount != 0); - ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA); - - /* ReactOS does not support this flag */ - if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) ASSERT(FALSE); - - /* Remove flags */ - Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | - MDL_PARTIAL_HAS_BEEN_MAPPED | - MDL_FREE_EXTRA_PTES); - - /* If we came from non-paged pool, on ReactOS, we can leave */ - if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) return; - - /* Loop each page */ - BaseAddress = PAGE_ALIGN(BaseAddress); - for (i = 0; i < PageCount; i++) - { - /* Delete it */ - MmDeleteVirtualMapping(NULL, - (PVOID)((ULONG_PTR)BaseAddress + (i * PAGE_SIZE)), - FALSE, - NULL, - NULL); - } - - /* Lock the mapping region */ - KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql); - - /* Deallocate all the pages used. */ - Base = ((ULONG_PTR)BaseAddress - (ULONG_PTR)MiMdlMappingRegionBase) / PAGE_SIZE; - RtlClearBits(&MiMdlMappingRegionAllocMap, Base, PageCount); - MiMdlMappingRegionHint = min(MiMdlMappingRegionHint, Base); - - /* Release the lock */ - KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql); - } - else - { - /* Sanity check */ - ASSERT(Mdl->Process == PsGetCurrentProcess()); - - /* Find the memory area */ - MemoryArea = MmLocateMemoryAreaByAddress(&Mdl->Process->Vm, - BaseAddress); - ASSERT(MemoryArea); - - /* Free it */ - MmFreeMemoryArea(&Mdl->Process->Vm, - MemoryArea, - NULL, - NULL); - } -} - -/* - * @implemented - */ -VOID -NTAPI -MmProbeAndLockPages(IN PMDL Mdl, - IN KPROCESSOR_MODE AccessMode, - IN LOCK_OPERATION Operation) -{ - PPFN_TYPE MdlPages; - PVOID Base, Address; - ULONG i, j; - ULONG NrPages; - NTSTATUS Status = STATUS_SUCCESS; - PFN_TYPE Page; - PEPROCESS CurrentProcess; - PETHREAD Thread; - PMMSUPPORT AddressSpace; - KIRQL OldIrql = KeGetCurrentIrql(); - DPRINT("Probing MDL: %p\n", Mdl); - - /* Sanity checks */ - ASSERT(Mdl->ByteCount != 0); - ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0); - ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); - ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | - MDL_MAPPED_TO_SYSTEM_VA | - MDL_SOURCE_IS_NONPAGED_POOL | - MDL_PARTIAL | - MDL_IO_SPACE)) == 0); - - /* Get page and base information */ - MdlPages = (PPFN_NUMBER)(Mdl + 1); - Base = (PVOID)Mdl->StartVa; - Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); - NrPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount); - ASSERT(NrPages != 0); - - /* Check if this is an MDL in I/O Space */ - if (Mdl->StartVa >= MmSystemRangeStart && - MmGetPfnForProcess(NULL, Mdl->StartVa) >= MmHighestPhysicalPage) - { - /* Just loop each page */ - for (i = 0; i < NrPages; i++) - { - /* And map it */ - MdlPages[i] = MmGetPfnForProcess(NULL, - (PVOID)((ULONG_PTR)Mdl->StartVa + (i * PAGE_SIZE))); - } - - /* Set the flags and exit */ - Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE; - return; - } - - /* Get the thread and process */ - Thread = PsGetCurrentThread(); - if (Address <= MM_HIGHEST_USER_ADDRESS) - { - /* Get the process */ - CurrentProcess = PsGetCurrentProcess(); - } - else - { - /* No process */ - CurrentProcess = NULL; - } - - /* Check what kind of operaiton this is */ - if (Operation != IoReadAccess) - { - /* Set the write flag */ - Mdl->MdlFlags |= MDL_WRITE_OPERATION; - } - else - { - /* Remove the write flag */ - Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION); - } - - /* Check if this came from kernel mode */ - if (Base >= MM_HIGHEST_USER_ADDRESS) - { - /* We should not have a process */ - ASSERT(CurrentProcess == NULL); - Mdl->Process = NULL; - AddressSpace = MmGetKernelAddressSpace(); - } - else - { - /* Sanity checks */ - ASSERT(NrPages != 0); - ASSERT(CurrentProcess == PsGetCurrentProcess()); - - /* Track locked pages */ - InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages, - NrPages); - - /* Save the process */ - Mdl->Process = CurrentProcess; - - /* Use the process lock */ - AddressSpace = &CurrentProcess->Vm; - } - - - /* - * Lock the pages - */ - if (OldIrql < DISPATCH_LEVEL) - MmLockAddressSpace(AddressSpace); - else - MmAcquirePageListLock(&OldIrql); - - for (i = 0; i < NrPages; i++) - { - PVOID Address; - - Address = (char*)Mdl->StartVa + (i*PAGE_SIZE); - - if (!MmIsPagePresent(NULL, Address)) - { - /* Fault the page in */ - Status = MmAccessFault(FALSE, Address, AccessMode, NULL); - if (!NT_SUCCESS(Status)) - { - goto cleanup; - } - } - else - { - MmLockPage(MmGetPfnForProcess(NULL, Address)); - } - - if ((Operation == IoWriteAccess || Operation == IoModifyAccess) && - (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE))) - { - Status = MmAccessFault(TRUE, Address, AccessMode, NULL); - if (!NT_SUCCESS(Status)) - { - for (j = 0; j < i; j++) - { - Page = MdlPages[j]; - if (Page < MmHighestPhysicalPage) - { - MmUnlockPage(Page); - MmDereferencePage(Page); - } - } - goto cleanup; - } - } - Page = MmGetPfnForProcess(NULL, Address); - MdlPages[i] = Page; - if (Page >= MmHighestPhysicalPage) - { - Mdl->MdlFlags |= MDL_IO_SPACE; - } - else - { - MmReferencePage(Page); - } - } - -cleanup: - if (OldIrql < DISPATCH_LEVEL) - MmUnlockAddressSpace(AddressSpace); - else - MmReleasePageListLock(OldIrql); - - if (!NT_SUCCESS(Status)) - ExRaiseStatus(STATUS_ACCESS_VIOLATION); - Mdl->MdlFlags |= MDL_PAGES_LOCKED; - return; -} - -/* - * @implemented - */ -PMDL -NTAPI -MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress, - IN PHYSICAL_ADDRESS HighAddress, - IN PHYSICAL_ADDRESS SkipBytes, - IN SIZE_T Totalbytes) -{ - PMDL Mdl; - PPFN_TYPE Pages; - ULONG NumberOfPagesWanted, NumberOfPagesAllocated; - ULONG Ret; - DPRINT("Allocating pages: %p\n", LowAddress.LowPart); - - /* SkipBytes must be a multiple of the page size */ - if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL; - - /* Create the actual MDL */ - Mdl = MmCreateMdl(NULL, NULL, Totalbytes); - if (!Mdl) return NULL; - - /* Allocate pages into the MDL */ - NumberOfPagesAllocated = 0; - NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE; - Pages = (PPFN_TYPE)(Mdl + 1); - while (NumberOfPagesWanted > 0) - { - Ret = MmAllocPagesSpecifyRange(MC_NPPOOL, - LowAddress, - HighAddress, - NumberOfPagesWanted, - Pages + NumberOfPagesAllocated); - if (Ret == (ULONG)-1) break; - - NumberOfPagesAllocated += Ret; - NumberOfPagesWanted -= Ret; - - if (SkipBytes.QuadPart == 0) break; - LowAddress.QuadPart += SkipBytes.QuadPart; - HighAddress.QuadPart += SkipBytes.QuadPart; - } - - /* If nothing was allocated, fail */ - if (NumberOfPagesWanted) - { - /* Free our MDL */ - ExFreePool(Mdl); - return NULL; - } - - /* Zero out the MDL pages */ - //RtlZeroMemory(LowAddress.LowPart, NumberOfPagesAllocated * PAGE_SIZE); - - /* Return the MDL */ - Mdl->MdlFlags |= MDL_PAGES_LOCKED; - Mdl->ByteCount = (ULONG)(NumberOfPagesAllocated * PAGE_SIZE); - return Mdl; -} - -/* - * @unimplemented - */ -PMDL -NTAPI -MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress, - IN PHYSICAL_ADDRESS HighAddress, - IN PHYSICAL_ADDRESS SkipBytes, - IN SIZE_T Totalbytes, - IN MEMORY_CACHING_TYPE CacheType, - IN ULONG Flags) -{ - UNIMPLEMENTED; - return NULL; -} - -/* - * @implemented - */ -PVOID -NTAPI -MmMapLockedPagesSpecifyCache(IN PMDL Mdl, - IN KPROCESSOR_MODE AccessMode, - IN MEMORY_CACHING_TYPE CacheType, - IN PVOID BaseAddress, - IN ULONG BugCheckOnFailure, - IN MM_PAGE_PRIORITY Priority) -{ - PVOID Base; - PULONG MdlPages; - KIRQL oldIrql; - ULONG PageCount; - ULONG StartingOffset; - PEPROCESS CurrentProcess; - NTSTATUS Status; - ULONG Protect; - MEMORY_AREA *Result; - LARGE_INTEGER BoundaryAddressMultiple; - DPRINT("Mapping MDL: %p\n", Mdl); - DPRINT("Base: %p\n", BaseAddress); - - /* Sanity checks */ - ASSERT(Mdl->ByteCount != 0); - - /* Get the base */ - Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); - - /* Set default page protection */ - Protect = PAGE_READWRITE; - if (CacheType == MmNonCached) Protect |= PAGE_NOCACHE; - - /* Handle kernel case first */ - if (AccessMode == KernelMode) - { - /* Get the list of pages and count */ - MdlPages = (PPFN_NUMBER)(Mdl + 1); - PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); - - /* Sanity checks */ - ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | - MDL_SOURCE_IS_NONPAGED_POOL | - MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0); - ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0); - - /* Allocate that number of pages from the mdl mapping region. */ - KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql); - StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, - PageCount, - MiMdlMappingRegionHint); - if (StartingOffset == 0xffffffff) - { - KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql); - DPRINT("Out of MDL mapping space\n"); - if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure) - { - return NULL; - } - KeBugCheck(MEMORY_MANAGEMENT); - } - Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE); - if (MiMdlMappingRegionHint == StartingOffset) MiMdlMappingRegionHint += PageCount; - KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql); - - /* Set the virtual mappings for the MDL pages. */ - if (Mdl->MdlFlags & MDL_IO_SPACE) - { - /* Map the pages */ - Status = MmCreateVirtualMappingUnsafe(NULL, - Base, - Protect, - MdlPages, - PageCount); - } - else - { - /* Map the pages */ - Status = MmCreateVirtualMapping(NULL, - Base, - Protect, - MdlPages, - PageCount); - } - - /* Check if the mapping suceeded */ - if (!NT_SUCCESS(Status)) - { - /* If it can fail, return NULL */ - if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL; - - /* Should we bugcheck? */ - if (!BugCheckOnFailure) return NULL; - - /* Yes, crash the system */ - KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0); - } - - /* Mark it as mapped */ - ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0); - Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; - - /* Check if it was partial */ - if (Mdl->MdlFlags & MDL_PARTIAL) - { - /* Write the appropriate flag here too */ - Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED; - } - - /* Save the mapped address */ - Base = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); - Mdl->MappedSystemVa = Base; - return Base; - } - - - /* Calculate the number of pages required. */ - MdlPages = (PPFN_NUMBER)(Mdl + 1); - PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE; - - BoundaryAddressMultiple.QuadPart = 0; - Base = BaseAddress; - - CurrentProcess = PsGetCurrentProcess(); - - MmLockAddressSpace(&CurrentProcess->Vm); - Status = MmCreateMemoryArea(&CurrentProcess->Vm, - MEMORY_AREA_MDL_MAPPING, - &Base, - PageCount * PAGE_SIZE, - Protect, - &Result, - (Base != NULL), - 0, - BoundaryAddressMultiple); - MmUnlockAddressSpace(&CurrentProcess->Vm); - if (!NT_SUCCESS(Status)) - { - if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) - { - return NULL; - } - - /* Throw exception */ - ExRaiseStatus(STATUS_ACCESS_VIOLATION); - ASSERT(0); - } - - /* Set the virtual mappings for the MDL pages. */ - if (Mdl->MdlFlags & MDL_IO_SPACE) - { - /* Map the pages */ - Status = MmCreateVirtualMappingUnsafe(CurrentProcess, - Base, - Protect, - MdlPages, - PageCount); - } - else - { - /* Map the pages */ - Status = MmCreateVirtualMapping(CurrentProcess, - Base, - Protect, - MdlPages, - PageCount); - } - - /* Check if the mapping suceeded */ - if (!NT_SUCCESS(Status)) - { - /* If it can fail, return NULL */ - if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL; - - /* Throw exception */ - ExRaiseStatus(STATUS_ACCESS_VIOLATION); - } - - /* Return the base */ - Base = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); - return Base; -} - -/* - * @unimplemented - */ -NTSTATUS -NTAPI -MmAdvanceMdl(IN PMDL Mdl, - IN ULONG NumberOfBytes) -{ - UNIMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; -} - -/* - * @unimplemented - */ -PVOID -NTAPI -MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress, - IN ULONG PoolTag, - IN PMDL MemoryDescriptorList, - IN MEMORY_CACHING_TYPE CacheType) -{ - UNIMPLEMENTED; - return 0; -} - -/* - * @unimplemented - */ -VOID -NTAPI -MmUnmapReservedMapping(IN PVOID BaseAddress, - IN ULONG PoolTag, - IN PMDL MemoryDescriptorList) -{ - UNIMPLEMENTED; -} - -/* - * @unimplemented - */ -NTSTATUS -NTAPI -MmPrefetchPages(IN ULONG NumberOfLists, - IN PREAD_LIST *ReadLists) -{ - UNIMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; -} - -/* - * @unimplemented - */ -NTSTATUS -NTAPI -MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList, - IN ULONG NewProtect) -{ - UNIMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; -} - -/* - * @unimplemented - */ -VOID -NTAPI -MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList, - IN PEPROCESS Process, - IN KPROCESSOR_MODE AccessMode, - IN LOCK_OPERATION Operation) -{ - UNIMPLEMENTED; -} - - -/* - * @unimplemented - */ -VOID -NTAPI -MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList, - IN LARGE_INTEGER PageList[], - IN KPROCESSOR_MODE AccessMode, - IN LOCK_OPERATION Operation) -{ - UNIMPLEMENTED; -} - -/* - * @unimplemented - */ -VOID -NTAPI -MmMapMemoryDumpMdl(IN PMDL Mdl) -{ - UNIMPLEMENTED; -} - -/* EOF */ - diff --git a/reactos/ntoskrnl/mm/mminit.c b/reactos/ntoskrnl/mm/mminit.c index 54a8dd59f76..58ef174dbaf 100644 --- a/reactos/ntoskrnl/mm/mminit.c +++ b/reactos/ntoskrnl/mm/mminit.c @@ -392,9 +392,6 @@ MmInit1(VOID) /* Initialize paged pool */ MmInitializePagedPool(); - - /* Initialize MDLs */ - MmInitializeMdlImplementation(); /* Initialize working sets */ MmInitializeMemoryConsumer(MC_USER, MmTrimUserMemory); diff --git a/reactos/ntoskrnl/mm/virtual.c b/reactos/ntoskrnl/mm/virtual.c index f33f5286c67..c2cc437db68 100644 --- a/reactos/ntoskrnl/mm/virtual.c +++ b/reactos/ntoskrnl/mm/virtual.c @@ -622,6 +622,115 @@ MiProtectVirtualMemory(IN PEPROCESS Process, return Status; } +PVOID +NTAPI +MiMapLockedPagesInUserSpace(IN PMDL Mdl, + IN PVOID BaseVa, + IN MEMORY_CACHING_TYPE CacheType, + IN PVOID BaseAddress) +{ + PVOID Base; + PPFN_NUMBER MdlPages; + ULONG PageCount; + PEPROCESS CurrentProcess; + NTSTATUS Status; + ULONG Protect; + MEMORY_AREA *Result; + LARGE_INTEGER BoundaryAddressMultiple; + + /* Calculate the number of pages required. */ + MdlPages = (PPFN_NUMBER)(Mdl + 1); + PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE; + + /* Set default page protection */ + Protect = PAGE_READWRITE; + if (CacheType == MmNonCached) Protect |= PAGE_NOCACHE; + + BoundaryAddressMultiple.QuadPart = 0; + Base = BaseAddress; + + CurrentProcess = PsGetCurrentProcess(); + + MmLockAddressSpace(&CurrentProcess->Vm); + Status = MmCreateMemoryArea(&CurrentProcess->Vm, + MEMORY_AREA_MDL_MAPPING, + &Base, + PageCount * PAGE_SIZE, + Protect, + &Result, + (Base != NULL), + 0, + BoundaryAddressMultiple); + MmUnlockAddressSpace(&CurrentProcess->Vm); + if (!NT_SUCCESS(Status)) + { + if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) + { + return NULL; + } + + /* Throw exception */ + ExRaiseStatus(STATUS_ACCESS_VIOLATION); + ASSERT(0); + } + + /* Set the virtual mappings for the MDL pages. */ + if (Mdl->MdlFlags & MDL_IO_SPACE) + { + /* Map the pages */ + Status = MmCreateVirtualMappingUnsafe(CurrentProcess, + Base, + Protect, + MdlPages, + PageCount); + } + else + { + /* Map the pages */ + Status = MmCreateVirtualMapping(CurrentProcess, + Base, + Protect, + MdlPages, + PageCount); + } + + /* Check if the mapping suceeded */ + if (!NT_SUCCESS(Status)) + { + /* If it can fail, return NULL */ + if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL; + + /* Throw exception */ + ExRaiseStatus(STATUS_ACCESS_VIOLATION); + } + + /* Return the base */ + Base = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); + return Base; +} + +VOID +NTAPI +MiUnmapLockedPagesInUserSpace(IN PVOID BaseAddress, + IN PMDL Mdl) +{ + PMEMORY_AREA MemoryArea; + + /* Sanity check */ + ASSERT(Mdl->Process == PsGetCurrentProcess()); + + /* Find the memory area */ + MemoryArea = MmLocateMemoryAreaByAddress(&Mdl->Process->Vm, + BaseAddress); + ASSERT(MemoryArea); + + /* Free it */ + MmFreeMemoryArea(&Mdl->Process->Vm, + MemoryArea, + NULL, + NULL); +} + /* PUBLIC FUNCTIONS ***********************************************************/ /* diff --git a/reactos/ntoskrnl/ntoskrnl-generic.rbuild b/reactos/ntoskrnl/ntoskrnl-generic.rbuild index 78092bf7f8c..58f416c5f94 100644 --- a/reactos/ntoskrnl/ntoskrnl-generic.rbuild +++ b/reactos/ntoskrnl/ntoskrnl-generic.rbuild @@ -366,6 +366,7 @@ hypermap.c init.c iosup.c + mdlsup.c pool.c procsup.c syspte.c @@ -375,7 +376,6 @@ dbgpool.c freelist.c marea.c - mdlsup.c mmfault.c mmsup.c mminit.c