- Implement a simple nonpaged pool page allocator and deallocator:

- Not actually used yet!
  - No support for expansion nonpaged pool yet.
  - No major optimizations.
    - We simply do the sane thing of trying to combine the next free allocation with the one we're about to make, on free.
      - If possible, we'll also check if the previous allocation was a free block, and we'll merge ourselves (and in the best case, whoever is following us) into that block instead.
  - No debug checks/validation.
- Write a couple lines of code to allocate and free up to 4096 pages (it will stop when it runs out).
  - This is to properly excercise this code on everyone's machines to make sure it works fine (it adds a little delay to booting, but only temporary).

svn path=/trunk/; revision=41859
This commit is contained in:
ReactOS Portable Systems Group 2009-07-11 06:46:39 +00:00
parent 08df2017e3
commit 2aabdcc5fd
2 changed files with 411 additions and 0 deletions

View file

@ -714,6 +714,26 @@ MmArmInitSystem(IN ULONG Phase,
// Initialize the nonpaged pool
//
InitializePool(NonPagedPool, 0);
//
// Do a little test of the nonpaged pool allocator
//
if (1)
{
ULONG i = 0;
PVOID Buffers[4096];
while (TRUE)
{
Buffers[i] = MiAllocatePoolPages(NonPagedPool, PAGE_SIZE);
if (!Buffers[i]) break;
i++;
}
while (i--)
{
MiFreePoolPages(Buffers[i]);
}
}
}
else
{

View file

@ -125,4 +125,395 @@ MiInitializeArmPool(VOID)
NonPagedPoolExpansion);
}
PVOID
NTAPI
MiAllocatePoolPages(IN POOL_TYPE PoolType,
IN SIZE_T SizeInBytes)
{
PFN_NUMBER SizeInPages;
ULONG i;
KIRQL OldIrql;
PLIST_ENTRY NextEntry, NextHead, LastHead;
PMMPTE PointerPte;
PMMPFN Pfn1;
PVOID BaseVa;
PMMFREE_POOL_ENTRY FreeEntry;
//
// Figure out how big the allocation is in pages
//
SizeInPages = BYTES_TO_PAGES(SizeInBytes);
//
// Allocations of less than 4 pages go into their individual buckets
//
i = SizeInPages - 1;
if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
//
// Loop through all the free page lists based on the page index
//
NextHead = &MmNonPagedPoolFreeListHead[i];
LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
//
// Acquire the nonpaged pool lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
do
{
//
// Now loop through all the free page entries in this given list
//
NextEntry = NextHead->Flink;
while (NextEntry != NextHead)
{
//
// Grab the entry and see if it can handle our allocation
//
FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
if (FreeEntry->Size >= SizeInPages)
{
//
// It does, so consume the pages from here
//
FreeEntry->Size -= SizeInPages;
//
// The allocation will begin in this free page area
//
BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
(FreeEntry->Size << PAGE_SHIFT));
//
// This is not a free page segment anymore
//
RemoveEntryList(&FreeEntry->List);
//
// However, check if its' still got space left
//
if (FreeEntry->Size != 0)
{
//
// Insert it back into a different list, based on its pages
//
i = FreeEntry->Size - 1;
if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
InsertTailList (&MmNonPagedPoolFreeListHead[i],
&FreeEntry->List);
}
//
// Grab the PTE for this allocation
//
PointerPte = MiAddressToPte(BaseVa);
ASSERT(PointerPte->u.Hard.Valid == 1);
//
// Grab the PFN NextEntry and index
//
Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
//
// Now mark it as the beginning of an allocation
//
ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
Pfn1->u3.e1.StartOfAllocation = 1;
//
// Check if the allocation is larger than one page
//
if (SizeInPages != 1)
{
//
// Navigate to the last PFN entry and PTE
//
PointerPte += SizeInPages - 1;
ASSERT(PointerPte->u.Hard.Valid == 1);
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
}
//
// Mark this PFN as the last (might be the same as the first)
//
ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
Pfn1->u3.e1.EndOfAllocation = 1;
//
// Release the nonpaged pool lock, and return the allocation
//
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
return BaseVa;
}
//
// Try the next free page entry
//
NextEntry = FreeEntry->List.Flink;
}
} while (++NextHead < LastHead);
//
// If we got here, we're out of space.
// Start by releasing the lock
//
KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
//
// We should now go into expansion nonpaged pool
//
DPRINT1("Out of NP Pool\n");
return NULL;
}
ULONG
NTAPI
MiFreePoolPages(IN PVOID StartingVa)
{
PMMPTE PointerPte, StartPte;
PMMPFN Pfn1, StartPfn;
PFN_NUMBER FreePages, NumberOfPages;
KIRQL OldIrql;
PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
ULONG i;
//
// Get the first PTE and its corresponding PFN entry
//
StartPte = PointerPte = MiAddressToPte(StartingVa);
StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
//
// Loop until we find the last PTE
//
while (Pfn1->u3.e1.EndOfAllocation == 0)
{
//
// Keep going
//
PointerPte++;
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
}
//
// Now we know how many pages we have
//
NumberOfPages = PointerPte - StartPte + 1;
//
// Acquire the nonpaged pool lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
//
// Mark the first and last PTEs as not part of an allocation anymore
//
StartPfn->u3.e1.StartOfAllocation = 0;
Pfn1->u3.e1.EndOfAllocation = 0;
//
// Assume we will free as many pages as the allocation was
//
FreePages = NumberOfPages;
//
// Peek one page past the end of the allocation
//
PointerPte++;
//
// Guard against going past initial nonpaged pool
//
if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
{
//
// This page is on the outskirts of initial nonpaged pool, so ignore it
//
DPRINT1("End of initial frame\n");
Pfn1 = NULL;
}
else
{
//
// Otherwise, our entire allocation must've fit within the initial non
// paged pool, or the expansion nonpaged pool, so get the PFN entry of
// the next allocation
//
ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
if (PointerPte->u.Hard.Valid == 1)
{
//
// It's either expansion or initial: get the PFN entry
//
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
}
else
{
//
// This means we've reached the guard page that protects the end of
// the expansion nonpaged pool
//
Pfn1 = NULL;
}
}
//
// Check if this allocation actually exists
//
if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
{
//
// It doesn't, so we should actually locate a free entry descriptor
//
FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
(NumberOfPages << PAGE_SHIFT));
ASSERT(FreeEntry->Owner == FreeEntry);
//
// Consume this entry's pages, and remove it from its free list
//
FreePages += FreeEntry->Size;
RemoveEntryList (&FreeEntry->List);
}
//
// Now get the official free entry we'll create for the caller's allocation
//
FreeEntry = StartingVa;
//
// Check if the our allocation is the very first page
//
if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
{
//
// Then we can't do anything or we'll risk underflowing
//
DPRINT1("Start of of initial frame\n");
Pfn1 = NULL;
}
else
{
//
// Otherwise, get the PTE for the page right before our allocation
//
PointerPte -= NumberOfPages + 1;
if (PointerPte->u.Hard.Valid == 1)
{
//
// It's either expansion or initial nonpaged pool, get the PFN entry
//
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
}
else
{
//
// We must've reached the guard page, so don't risk touching it
//
Pfn1 = NULL;
}
}
//
// Check if there is a valid PFN entry for the page before the allocation
// and then check if this page was actually the end of an allocation.
// If it wasn't, then we know for sure it's a free page
//
if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
{
//
// Get the free entry descriptor for that given page range
//
FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
FreeEntry = FreeEntry->Owner;
//
// Check if the entry is small enough to be indexed on a free list
// If it is, we'll want to re-insert it, since we're about to
// collapse our pages on top of it, which will change its count
//
if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
{
//
// Remove the list from where it is now
//
RemoveEntryList(&FreeEntry->List);
//
// Update its size
//
FreeEntry->Size += FreePages;
//
// And now find the new appropriate list to place it in
//
i = (ULONG)(FreeEntry->Size - 1);
if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
//
// Do it
//
InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
}
else
{
//
// Otherwise, just combine our free pages into this entry
//
FreeEntry->Size += FreePages;
}
}
//
// Check if we were unable to do any compaction, and we'll stick with this
//
if (FreeEntry == StartingVa)
{
//
// Well, now we are a free entry. At worse we just have our newly freed
// pages, at best we have our pages plus whatever entry came after us
//
FreeEntry->Size = FreePages;
//
// Find the appropriate list we should be on
//
i = FreeEntry->Size - 1;
if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
//
// And insert us
//
InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
}
//
// Just a sanity check
//
ASSERT(FreePages != 0);
//
// Get all the pages between our allocation and its end. These will all now
// become free page chunks.
//
NextEntry = StartingVa;
LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
do
{
//
// Link back to the parent free entry, and keep going
//
NextEntry->Owner = FreeEntry;
NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
} while (NextEntry != LastEntry);
//
// We're done, release the lock and let the caller know how much we freed
//
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
return NumberOfPages;
}
/* EOF */