- Reimplement kernel stack allocation (MmCreateKernelStack, MmDeleteKernelStack, MmGrowKernelStack, MmGrowKernelStackEx):

- Use System PTEs.
  - Allocate 12KB stacks with a 4KB guard page, and expand them up to 60KB as required.
    - Previous implementation always allocated 60KB, without any guard page.
  - The result is that on a minimal ReactOS install, simply booting up now requires an average of 0.5MB less of physical memory than before.
  - And once again, optimizations to the system PTE allocation code should significantly improve performance.
  - Should also analyze benefits of using a dead stack slist as done on Windows. (Assembla Ticket #39).


svn path=/trunk/; revision=41636
This commit is contained in:
ReactOS Portable Systems Group 2009-06-27 08:41:45 +00:00
parent 5a526879a4
commit ddedfd132a
3 changed files with 286 additions and 132 deletions

View file

@ -0,0 +1,285 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: BSD - See COPYING.ARM in the top level directory
* FILE: ntoskrnl/mm/ARM3/procsup.c
* PURPOSE: ARM Memory Manager Process Related Management
* PROGRAMMERS: ReactOS Portable Systems Group
*/
/* INCLUDES *******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::PROCSUP"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
ULONG PagesForStacks = 0;
/* PRIVATE FUNCTIONS **********************************************************/
VOID
NTAPI
MmDeleteKernelStack(IN PVOID StackBase,
IN BOOLEAN GuiStack)
{
PMMPTE PointerPte;
PFN_NUMBER StackPages;
ULONG i;
//
// This should be the guard page, so decrement by one
//
PointerPte = MiAddressToPte(StackBase);
PointerPte--;
//
// Calculate pages used
//
StackPages = BYTES_TO_PAGES(GuiStack ?
KERNEL_LARGE_STACK_SIZE : KERNEL_STACK_SIZE);
//
// Loop them
//
for (i = 0; i < StackPages; i++)
{
//
// Check if this is a valid PTE
//
if (PointerPte->u.Hard.Valid == 1)
{
//
// Nuke it
//
MmReleasePageMemoryConsumer(MC_NPPOOL, PFN_FROM_PTE(PointerPte));
PagesForStacks--;
}
//
// Next one
//
PointerPte--;
}
//
// We should be at the guard page now
//
ASSERT(PointerPte->u.Hard.Valid == 0);
//
// Release the PTEs
//
MiReleaseSystemPtes(PointerPte, StackPages + 1, SystemPteSpace);
}
PVOID
NTAPI
MmCreateKernelStack(IN BOOLEAN GuiStack,
IN UCHAR Node)
{
PFN_NUMBER StackPtes, StackPages;
PMMPTE PointerPte, StackPte;
PVOID BaseAddress;
MMPTE TempPte;
KIRQL OldIrql;
PFN_NUMBER PageFrameIndex;
ULONG i;
//
// Calculate pages needed
//
if (GuiStack)
{
//
// We'll allocate 64KB stack, but only commit 12K
//
StackPtes = BYTES_TO_PAGES(KERNEL_LARGE_STACK_SIZE);
StackPages = BYTES_TO_PAGES(KERNEL_LARGE_STACK_COMMIT);
}
else
{
//
// We'll allocate 12K and that's it
//
StackPtes = BYTES_TO_PAGES(KERNEL_STACK_SIZE);
StackPages = StackPtes;
}
//
// Reserve stack pages, plus a guard page
//
StackPte = MiReserveSystemPtes(StackPtes + 1, SystemPteSpace);
if (!StackPte) return NULL;
//
// Get the stack address
//
BaseAddress = MiPteToAddress(StackPte + StackPtes + 1);
//
// Select the right PTE address where we actually start committing pages
//
PointerPte = StackPte;
if (GuiStack) PointerPte += BYTES_TO_PAGES(KERNEL_LARGE_STACK_SIZE -
KERNEL_LARGE_STACK_COMMIT);
//
// Setup the template stack PTE
//
TempPte = HyperTemplatePte;
TempPte.u.Hard.Global = FALSE;
TempPte.u.Hard.PageFrameNumber = 0;
TempPte.u.Hard.Dirty = TRUE;
//
// Acquire the PFN DB lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
// Loop each stack page
//
for (i = 0; i < StackPages; i++)
{
//
// Next PTE
//
PointerPte++;
ASSERT(PointerPte->u.Hard.Valid == 0);
//
// Get a page
//
PageFrameIndex = MmAllocPage(MC_NPPOOL, 0);
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
//
// Write it
//
*PointerPte = TempPte;
PagesForStacks++;
}
//
// Release the PFN lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// Return the stack address
//
return BaseAddress;
}
NTSTATUS
NTAPI
MmGrowKernelStackEx(IN PVOID StackPointer,
IN ULONG GrowSize)
{
PKTHREAD Thread = KeGetCurrentThread();
PMMPTE LimitPte, NewLimitPte, LastPte;
PFN_NUMBER StackPages;
KIRQL OldIrql;
MMPTE TempPte;
PFN_NUMBER PageFrameIndex;
//
// Make sure the stack did not overflow
//
ASSERT(((ULONG_PTR)Thread->StackBase - (ULONG_PTR)Thread->StackLimit) <=
(KERNEL_LARGE_STACK_SIZE + PAGE_SIZE));
//
// Get the current stack limit
//
LimitPte = MiAddressToPte(Thread->StackLimit);
ASSERT(LimitPte->u.Hard.Valid == 1);
//
// Get the new one and make sure this isn't a retarded request
//
NewLimitPte = MiAddressToPte((PVOID)((ULONG_PTR)StackPointer - GrowSize));
if (NewLimitPte == LimitPte) return STATUS_SUCCESS;
//
// Now make sure you're not going past the reserved space
//
LastPte = MiAddressToPte((PVOID)((ULONG_PTR)Thread->StackBase -
KERNEL_LARGE_STACK_SIZE));
if (NewLimitPte < LastPte)
{
//
// Sorry!
//
DPRINT1("Thread wants too much stack\n");
return STATUS_STACK_OVERFLOW;
}
//
// Calculate the number of new pages
//
LimitPte--;
StackPages = (LimitPte - NewLimitPte + 1);
//
// Setup the template stack PTE
//
TempPte = HyperTemplatePte;
TempPte.u.Hard.Global = FALSE;
TempPte.u.Hard.PageFrameNumber = 0;
TempPte.u.Hard.Dirty = TRUE;
//
// Acquire the PFN DB lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
// Loop each stack page
//
while (LimitPte >= NewLimitPte)
{
//
// Sanity check
//
ASSERT(LimitPte->u.Hard.Valid == 0);
//
// Get a page
//
PageFrameIndex = MmAllocPage(MC_NPPOOL, 0);
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
//
// Write it
//
*LimitPte-- = TempPte;
}
//
// Release the PFN lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// Set the new limit
//
Thread->StackLimit = (ULONG_PTR)MiPteToAddress(NewLimitPte);
return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
MmGrowKernelStack(IN PVOID StackPointer)
{
//
// Call the extended version
//
return MmGrowKernelStackEx(StackPointer, KERNEL_LARGE_STACK_COMMIT);
}
/* EOF */

View file

@ -126,38 +126,6 @@ MiCreatePebOrTeb(PEPROCESS Process,
return RVA(AllocatedBase, PAGE_SIZE);
}
VOID
MiFreeStackPage(PVOID Context,
MEMORY_AREA* MemoryArea,
PVOID Address,
PFN_TYPE Page,
SWAPENTRY SwapEntry,
BOOLEAN Dirty)
{
ASSERT(SwapEntry == 0);
if (Page) MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
}
VOID
NTAPI
MmDeleteKernelStack(PVOID StackBase,
BOOLEAN GuiStack)
{
ULONG StackSize = GuiStack ? KERNEL_LARGE_STACK_SIZE : KERNEL_STACK_SIZE;
/* Lock the Address Space */
MmLockAddressSpace(MmGetKernelAddressSpace());
/* Delete the Stack */
MmFreeMemoryAreaByPtr(MmGetKernelAddressSpace(),
(PVOID)((ULONG_PTR)StackBase - StackSize),
MiFreeStackPage,
NULL);
/* Unlock the Address Space */
MmUnlockAddressSpace(MmGetKernelAddressSpace());
}
VOID
NTAPI
MmDeleteTeb(PEPROCESS Process,
@ -180,106 +148,6 @@ MmDeleteTeb(PEPROCESS Process,
MmUnlockAddressSpace(ProcessAddressSpace);
}
PVOID
NTAPI
MmCreateKernelStack(BOOLEAN GuiStack,
UCHAR Node)
{
PMEMORY_AREA StackArea;
ULONG i;
PHYSICAL_ADDRESS BoundaryAddressMultiple;
ULONG StackSize = GuiStack ? KERNEL_LARGE_STACK_SIZE : KERNEL_STACK_SIZE;
PFN_TYPE Page[KERNEL_LARGE_STACK_SIZE / PAGE_SIZE];
PVOID KernelStack = NULL;
NTSTATUS Status;
/* Initialize the Boundary Address */
BoundaryAddressMultiple.QuadPart = 0;
/* Lock the Kernel Address Space */
MmLockAddressSpace(MmGetKernelAddressSpace());
/* Create a MAREA for the Kernel Stack */
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_KERNEL_STACK,
&KernelStack,
StackSize,
PAGE_READWRITE,
&StackArea,
FALSE,
0,
BoundaryAddressMultiple);
/* Unlock the Address Space */
MmUnlockAddressSpace(MmGetKernelAddressSpace());
/* Check for Success */
if (!NT_SUCCESS(Status))
{
DPRINT1("Failed to create thread stack\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
/*
* Mark the Stack in use.
* Note: Currently we mark all 60KB in use for a GUI Thread.
* We should only do this inside MmGrowKernelStack. TODO!
*/
for (i = 0; i < (StackSize / PAGE_SIZE); i++)
{
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page[i]);
}
/* Create a Virtual Mapping for it */
Status = MmCreateVirtualMapping(NULL,
KernelStack,
PAGE_READWRITE,
Page,
StackSize / PAGE_SIZE);
/* Check for success */
if (!NT_SUCCESS(Status))
{
DPRINT1("Could not create Virtual Mapping for Kernel Stack\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
/* Return the stack base */
return (PVOID)((ULONG_PTR)KernelStack +
(GuiStack ? KERNEL_LARGE_STACK_SIZE : KERNEL_STACK_SIZE));
}
/*
* @implemented
*/
NTSTATUS
NTAPI
MmGrowKernelStack(PVOID StackPointer)
{
PETHREAD Thread = PsGetCurrentThread();
/* Make sure the stack did not overflow */
ASSERT(((PCHAR)Thread->Tcb.StackBase - (PCHAR)Thread->Tcb.StackLimit) <=
(KERNEL_LARGE_STACK_SIZE + PAGE_SIZE));
/* Check if we have reserved space for our grow */
if ((PCHAR)Thread->Tcb.StackBase - (PCHAR)Thread->Tcb.StackLimit +
KERNEL_STACK_SIZE > KERNEL_LARGE_STACK_SIZE)
{
return STATUS_STACK_OVERFLOW;
}
/*
* We'll give you three more pages.
* NOTE: See note in MmCreateKernelStack. These pages are already being reserved.
* It would be more efficient to only grow them (commit them) here.
*/
Thread->Tcb.StackLimit -= KERNEL_STACK_SIZE;
/* Return success */
return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
MmCreatePeb(PEPROCESS Process)

View file

@ -364,6 +364,7 @@
<file>init.c</file>
<file>iosup.c</file>
<file>pool.c</file>
<file>procsup.c</file>
<file>syspte.c</file>
</directory>
<file>anonmem.c</file>