- Fix recursive spinlock acquisition in Mm caused by locking inconsistency between ARM3 and the old ReactOS Mm. The old Mm calls certain routines to modify PFN entries (lock, unlock, reference, dereference, etc) and acquires/releases the PFN lock inside those functions (which is extremely inefficient as you can't, for example, have to acquire/release the PFN lock twice to reference and lock the same page), while ARM3 synchronizes differently and holds the lock while calling those routines, resulting in a recursive lock attempt on MP (which works on UP because spinlocks are just IRQL raise/lower there). Move out locking from MmAllocPage, MmReference/DereferencePage and MmLock/UnlockPage to the callers to be consistent with ARM3.

- Add missing PFN locking to MmFreePagesFromMdl and MiAllocatePoolPages.
- Get rid of MmLockPageUnsafe and MmReferencePageUnsafe. The "safe" routines just forwarded to the unsafe versions -- call them directly instead. Remove unused MmAcquirePageListLock/MmReleasePageListLock.

svn path=/trunk/; revision=43240
This commit is contained in:
Stefan Ginsberg 2009-09-30 18:24:00 +00:00
parent 4f128a3206
commit b42aaf88bb
8 changed files with 72 additions and 81 deletions

View file

@ -1093,10 +1093,6 @@ VOID
NTAPI
MmLockPage(PFN_TYPE Page);
VOID
NTAPI
MmLockPageUnsafe(PFN_TYPE Page);
VOID
NTAPI
MmUnlockPage(PFN_TYPE Page);
@ -1105,23 +1101,6 @@ ULONG
NTAPI
MmGetLockCountPage(PFN_TYPE Page);
static
__inline
KIRQL
NTAPI
MmAcquirePageListLock()
{
return KeAcquireQueuedSpinLock(LockQueuePfnLock);
}
FORCEINLINE
VOID
NTAPI
MmReleasePageListLock(KIRQL oldIrql)
{
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
}
VOID
NTAPI
MmInitializePageList(
@ -1343,10 +1322,6 @@ VOID
NTAPI
MmReferencePage(PFN_TYPE Page);
VOID
NTAPI
MmReferencePageUnsafe(PFN_TYPE Page);
ULONG
NTAPI
MmGetReferenceCountPage(PFN_TYPE Page);

View file

@ -215,6 +215,7 @@ MmFreePagesFromMdl(IN PMDL Mdl)
PPFN_NUMBER Pages;
LONG NumberOfPages;
PMMPFN Pfn1;
KIRQL OldIrql;
DPRINT("Freeing MDL: %p\n", Mdl);
//
@ -229,7 +230,12 @@ MmFreePagesFromMdl(IN PMDL Mdl)
//
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
//
// Acquire PFN lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
// Loop all the MDL pages
//
@ -268,7 +274,12 @@ MmFreePagesFromMdl(IN PMDL Mdl)
//
*Pages++ = -1;
} while (--NumberOfPages != 0);
//
// Release the lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// Remove the pages locked flag
//

View file

@ -141,6 +141,7 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
PMMPFN Pfn1;
PVOID BaseVa;
PMMFREE_POOL_ENTRY FreeEntry;
PKSPIN_LOCK_QUEUE LockQueue;
//
// Figure out how big the allocation is in pages
@ -285,7 +286,8 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
//
// Lock the PFN database too
//
//KeAcquireQueuedSpinLockAtDpcLevel(LockQueuePfnLock);
LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
//
// Loop the pages
@ -326,7 +328,7 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
//
// Release the PFN and nonpaged pool lock
//
//KeReleaseQueuedSpinLockFromDpcLevel(LockQueuePfnLock);
KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
//

View file

@ -258,6 +258,7 @@ MmNotPresentFaultVirtualMemory(PMMSUPPORT AddressSpace,
PMM_REGION Region;
PMM_PAGEOP PageOp;
PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
KIRQL OldIrql;
/*
* There is a window between taking the page fault and locking the
@ -268,7 +269,9 @@ MmNotPresentFaultVirtualMemory(PMMSUPPORT AddressSpace,
{
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(MmGetPfnForProcess(NULL, Address));
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
return(STATUS_SUCCESS);
}
@ -362,7 +365,9 @@ MmNotPresentFaultVirtualMemory(PMMSUPPORT AddressSpace,
MmLockAddressSpace(AddressSpace);
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(MmGetPfnForProcess(NULL, Address));
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
@ -437,7 +442,9 @@ MmNotPresentFaultVirtualMemory(PMMSUPPORT AddressSpace,
*/
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);

View file

@ -105,7 +105,7 @@ MmReleasePageMemoryConsumer(ULONG Consumer, PFN_TYPE Page)
{
PMM_ALLOCATION_REQUEST Request;
PLIST_ENTRY Entry;
KIRQL oldIrql;
KIRQL OldIrql;
if (Page == 0)
{
@ -113,20 +113,22 @@ MmReleasePageMemoryConsumer(ULONG Consumer, PFN_TYPE Page)
KeBugCheck(MEMORY_MANAGEMENT);
}
KeAcquireSpinLock(&AllocationListLock, &oldIrql);
KeAcquireSpinLock(&AllocationListLock, &OldIrql);
if (MmGetReferenceCountPage(Page) == 1)
{
(void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
if (IsListEmpty(&AllocationListHead) || MmStats.NrFreePages < MiMinimumAvailablePages)
{
KeReleaseSpinLock(&AllocationListLock, oldIrql);
KeReleaseSpinLock(&AllocationListLock, OldIrql);
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmDereferencePage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
else
{
Entry = RemoveHeadList(&AllocationListHead);
Request = CONTAINING_RECORD(Entry, MM_ALLOCATION_REQUEST, ListEntry);
KeReleaseSpinLock(&AllocationListLock, oldIrql);
KeReleaseSpinLock(&AllocationListLock, OldIrql);
if(Consumer == MC_USER) MmRemoveLRUUserPage(Page);
MiZeroPage(Page);
Request->Page = Page;
@ -135,9 +137,11 @@ MmReleasePageMemoryConsumer(ULONG Consumer, PFN_TYPE Page)
}
else
{
KeReleaseSpinLock(&AllocationListLock, oldIrql);
KeReleaseSpinLock(&AllocationListLock, OldIrql);
if(Consumer == MC_USER) MmRemoveLRUUserPage(Page);
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmDereferencePage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
return(STATUS_SUCCESS);
@ -235,7 +239,7 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
{
ULONG OldUsed;
PFN_TYPE Page;
KIRQL oldIrql;
KIRQL OldIrql;
/*
* Make sure we don't exceed our individual target.
@ -257,7 +261,9 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
*/
if ((Consumer == MC_NPPOOL) || (Consumer == MC_SYSTEM) || MiIsBalancerThread())
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MmAllocPage(Consumer, 0);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
if (Page == 0)
{
KeBugCheck(NO_PAGES_AVAILABLE);
@ -290,14 +296,14 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
KeInitializeEvent(&Request.Event, NotificationEvent, FALSE);
(void)InterlockedIncrementUL(&MiPagesRequired);
KeAcquireSpinLock(&AllocationListLock, &oldIrql);
KeAcquireSpinLock(&AllocationListLock, &OldIrql);
if (MiBalancerThreadHandle != NULL)
{
KeSetEvent(&MiBalancerEvent, IO_NO_INCREMENT, FALSE);
}
InsertTailList(&AllocationListHead, &Request.ListEntry);
KeReleaseSpinLock(&AllocationListLock, oldIrql);
KeReleaseSpinLock(&AllocationListLock, OldIrql);
KeWaitForSingleObject(&Request.Event,
0,
@ -321,7 +327,9 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
/*
* Actually allocate the page.
*/
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MmAllocPage(Consumer, 0);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
if (Page == 0)
{
KeBugCheck(NO_PAGES_AVAILABLE);

View file

@ -830,20 +830,17 @@ MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
VOID
NTAPI
MmReferencePageUnsafe(PFN_TYPE Pfn)
MmReferencePage(PFN_TYPE Pfn)
{
KIRQL oldIrql;
PPHYSICAL_PAGE Page;
DPRINT("MmReferencePageUnsafe(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
{
return;
}
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
@ -853,16 +850,6 @@ MmReferencePageUnsafe(PFN_TYPE Pfn)
}
Page->ReferenceCount++;
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
}
VOID
NTAPI
MmReferencePage(PFN_TYPE Pfn)
{
DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
MmReferencePageUnsafe(Pfn);
}
ULONG
@ -904,13 +891,10 @@ VOID
NTAPI
MmDereferencePage(PFN_TYPE Pfn)
{
KIRQL oldIrql;
PPHYSICAL_PAGE Page;
DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
@ -962,7 +946,6 @@ MmDereferencePage(PFN_TYPE Pfn)
KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
}
}
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
}
ULONG
@ -993,14 +976,11 @@ MmGetLockCountPage(PFN_TYPE Pfn)
VOID
NTAPI
MmLockPageUnsafe(PFN_TYPE Pfn)
MmLockPage(PFN_TYPE Pfn)
{
KIRQL oldIrql;
PPHYSICAL_PAGE Page;
DPRINT("MmLockPageUnsafe(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
@ -1011,29 +991,16 @@ MmLockPageUnsafe(PFN_TYPE Pfn)
}
Page->LockCount++;
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
}
VOID
NTAPI
MmLockPage(PFN_TYPE Pfn)
{
DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
MmLockPageUnsafe(Pfn);
}
VOID
NTAPI
MmUnlockPage(PFN_TYPE Pfn)
{
KIRQL oldIrql;
PPHYSICAL_PAGE Page;
DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
@ -1043,7 +1010,6 @@ MmUnlockPage(PFN_TYPE Pfn)
}
Page->LockCount--;
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
}
PFN_TYPE
@ -1053,12 +1019,10 @@ MmAllocPage(ULONG Consumer, SWAPENTRY SwapEntry)
PFN_TYPE PfnOffset;
PLIST_ENTRY ListEntry;
PPHYSICAL_PAGE PageDescriptor;
KIRQL oldIrql;
BOOLEAN NeedClear = FALSE;
DPRINT("MmAllocPage()\n");
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
if (IsListEmpty(&FreeZeroedPageListHead))
{
if (IsListEmpty(&FreeUnzeroedPageListHead))
@ -1070,7 +1034,6 @@ MmAllocPage(ULONG Consumer, SWAPENTRY SwapEntry)
}
DPRINT1("MmAllocPage(): Out of memory\n");
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
return 0;
}
ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
@ -1106,8 +1069,6 @@ MmAllocPage(ULONG Consumer, SWAPENTRY SwapEntry)
MmStats.NrSystemPages++;
MmStats.NrFreePages--;
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
PfnOffset = PageDescriptor - MmPfnDatabase;
if ((NeedClear) && (Consumer != MC_SYSTEM))
{

View file

@ -288,6 +288,8 @@ MmCommitPagedPoolAddress(PVOID Address, BOOLEAN Locked)
{
NTSTATUS Status;
PFN_TYPE AllocatedPage;
KIRQL OldIrql;
Status = MmRequestPageMemoryConsumer(MC_PPOOL, FALSE, &AllocatedPage);
if (!NT_SUCCESS(Status))
{
@ -303,7 +305,9 @@ MmCommitPagedPoolAddress(PVOID Address, BOOLEAN Locked)
1);
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(AllocatedPage);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
return(Status);
}

View file

@ -771,6 +771,7 @@ MmNotPresentFaultSectionView(PMMSUPPORT AddressSpace,
PMM_REGION Region;
BOOLEAN HasSwapEntry;
PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
KIRQL OldIrql;
/*
* There is a window between taking the page fault and locking the
@ -781,7 +782,9 @@ MmNotPresentFaultSectionView(PMMSUPPORT AddressSpace,
{
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(MmGetPfnForProcess(Process, Address));
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
return(STATUS_SUCCESS);
}
@ -908,7 +911,9 @@ MmNotPresentFaultSectionView(PMMSUPPORT AddressSpace,
}
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
MmUnlockSectionSegment(Segment);
PageOp->Status = STATUS_SUCCESS;
@ -978,7 +983,9 @@ MmNotPresentFaultSectionView(PMMSUPPORT AddressSpace,
*/
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
@ -1013,7 +1020,9 @@ MmNotPresentFaultSectionView(PMMSUPPORT AddressSpace,
*/
if (Locked)
{
MmLockPageUnsafe(Page);
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
/*
@ -1056,7 +1065,9 @@ MmNotPresentFaultSectionView(PMMSUPPORT AddressSpace,
MmInsertRmap(Page, Process, (PVOID)PAddress);
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
/*
@ -1156,7 +1167,9 @@ MmNotPresentFaultSectionView(PMMSUPPORT AddressSpace,
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
@ -1230,7 +1243,9 @@ MmNotPresentFaultSectionView(PMMSUPPORT AddressSpace,
MmInsertRmap(Page, Process, (PVOID)PAddress);
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
@ -1262,7 +1277,9 @@ MmNotPresentFaultSectionView(PMMSUPPORT AddressSpace,
MmInsertRmap(Page, Process, (PVOID)PAddress);
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
PageOp->Status = STATUS_SUCCESS;
MmspCompleteAndReleasePageOp(PageOp);
@ -1289,6 +1306,7 @@ MmAccessFaultSectionView(PMMSUPPORT AddressSpace,
PMM_REGION Region;
ULONG Entry;
PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
KIRQL OldIrql;
DPRINT("MmAccessFaultSectionView(%x, %x, %x, %x)\n", AddressSpace, MemoryArea, Address, Locked);
@ -1429,8 +1447,10 @@ MmAccessFaultSectionView(PMMSUPPORT AddressSpace,
}
if (Locked)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmLockPage(NewPage);
MmUnlockPage(OldPage);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
/*
@ -1511,6 +1531,7 @@ MmPageOutSectionView(PMMSUPPORT AddressSpace,
BOOLEAN DirectMapped;
BOOLEAN IsImageSection;
PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
KIRQL OldIrql;
Address = (PVOID)PAGE_ROUND_DOWN(Address);
@ -1599,7 +1620,9 @@ MmPageOutSectionView(PMMSUPPORT AddressSpace,
}
else
{
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MmReferencePage(Page);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
MmDeleteAllRmaps(Page, (PVOID)&Context, MmPageOutDeleteMapping);