2009-06-27 08:41:45 +00:00
|
|
|
/*
|
|
|
|
* PROJECT: ReactOS Kernel
|
|
|
|
* LICENSE: BSD - See COPYING.ARM in the top level directory
|
|
|
|
* FILE: ntoskrnl/mm/ARM3/procsup.c
|
|
|
|
* PURPOSE: ARM Memory Manager Process Related Management
|
|
|
|
* PROGRAMMERS: ReactOS Portable Systems Group
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* INCLUDES *******************************************************************/
|
|
|
|
|
|
|
|
#include <ntoskrnl.h>
|
|
|
|
#define NDEBUG
|
|
|
|
#include <debug.h>
|
|
|
|
|
|
|
|
#line 15 "ARM³::PROCSUP"
|
|
|
|
#define MODULE_INVOLVED_IN_ARM3
|
|
|
|
#include "../ARM3/miarm.h"
|
|
|
|
|
2009-10-15 05:56:41 +00:00
|
|
|
extern MM_SYSTEMSIZE MmSystemSize;
|
|
|
|
|
2009-06-27 08:41:45 +00:00
|
|
|
/* PRIVATE FUNCTIONS **********************************************************/
|
|
|
|
|
[NTOS]: MiRosTakeOverPebTebRanges now creates a small ~1MB ARM3 memory range on top of the ReactOS per-process VA. This does a couple of things: First of all, it changes the default PEB address to another static address. Still not dynamic like it will be soon, but at least it changes it a bit so we can test if anything breaks due to that. It also likewise changes the addresses of the TEBs (Shifted down by 1MB, basically). Finally, it blocks off that part of address space, which nobody should be using now, to see if anyone does indeed touch it.
[NTOS]: Knowing if this change causes issues will help later in determining regressions due to TEB/PEBs mapped as VADs by ARM3, and regressions simply due to the change in VA layout.
[NTOS]: When implemented, the VAD mapping for PEB/TEB will only use that ~1MB, which yes, will limit ReactOS processes to each have only 256 threads. That is obviously a temporary limitation, one I doubt we'll even hit, but I'm putting it out here so you know.
svn path=/trunk/; revision=48176
2010-07-22 03:22:43 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiRosTakeOverPebTebRanges(IN PEPROCESS Process)
|
|
|
|
{
|
|
|
|
NTSTATUS Status;
|
|
|
|
PMEMORY_AREA MemoryArea;
|
|
|
|
PHYSICAL_ADDRESS BoundaryAddressMultiple;
|
|
|
|
PVOID AllocatedBase = (PVOID)MI_LOWEST_VAD_ADDRESS;
|
|
|
|
BoundaryAddressMultiple.QuadPart = 0;
|
|
|
|
|
|
|
|
Status = MmCreateMemoryArea(&Process->Vm,
|
|
|
|
MEMORY_AREA_OWNED_BY_ARM3,
|
|
|
|
&AllocatedBase,
|
2010-07-22 20:54:37 +00:00
|
|
|
((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - 1) -
|
[NTOS]: MiRosTakeOverPebTebRanges now creates a small ~1MB ARM3 memory range on top of the ReactOS per-process VA. This does a couple of things: First of all, it changes the default PEB address to another static address. Still not dynamic like it will be soon, but at least it changes it a bit so we can test if anything breaks due to that. It also likewise changes the addresses of the TEBs (Shifted down by 1MB, basically). Finally, it blocks off that part of address space, which nobody should be using now, to see if anyone does indeed touch it.
[NTOS]: Knowing if this change causes issues will help later in determining regressions due to TEB/PEBs mapped as VADs by ARM3, and regressions simply due to the change in VA layout.
[NTOS]: When implemented, the VAD mapping for PEB/TEB will only use that ~1MB, which yes, will limit ReactOS processes to each have only 256 threads. That is obviously a temporary limitation, one I doubt we'll even hit, but I'm putting it out here so you know.
svn path=/trunk/; revision=48176
2010-07-22 03:22:43 +00:00
|
|
|
(ULONG_PTR)MI_LOWEST_VAD_ADDRESS,
|
|
|
|
PAGE_READWRITE,
|
|
|
|
&MemoryArea,
|
|
|
|
TRUE,
|
|
|
|
0,
|
|
|
|
BoundaryAddressMultiple);
|
|
|
|
ASSERT(NT_SUCCESS(Status));
|
|
|
|
}
|
|
|
|
|
2010-07-22 19:08:45 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiCreatePebOrTeb(IN PEPROCESS Process,
|
|
|
|
IN ULONG Size,
|
|
|
|
OUT PULONG_PTR Base)
|
|
|
|
{
|
|
|
|
PETHREAD Thread = PsGetCurrentThread();
|
|
|
|
PMMVAD_LONG Vad;
|
|
|
|
NTSTATUS Status;
|
|
|
|
ULONG RandomCoeff;
|
|
|
|
ULONG_PTR StartAddress, EndAddress;
|
|
|
|
LARGE_INTEGER CurrentTime;
|
|
|
|
|
|
|
|
/* Allocate a VAD */
|
|
|
|
Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
|
|
|
|
if (!Vad) return STATUS_NO_MEMORY;
|
|
|
|
|
|
|
|
/* Setup the primary flags with the size, and make it commited, private, RW */
|
|
|
|
Vad->u.LongFlags = 0;
|
|
|
|
Vad->u.VadFlags.CommitCharge = BYTES_TO_PAGES(Size);
|
|
|
|
Vad->u.VadFlags.MemCommit = TRUE;
|
|
|
|
Vad->u.VadFlags.PrivateMemory = TRUE;
|
|
|
|
Vad->u.VadFlags.Protection = MM_READWRITE;
|
|
|
|
Vad->u.VadFlags.NoChange = TRUE;
|
|
|
|
|
|
|
|
/* Setup the secondary flags to make it a secured, writable, long VAD */
|
|
|
|
Vad->u2.LongFlags2 = 0;
|
|
|
|
Vad->u2.VadFlags2.OneSecured = TRUE;
|
|
|
|
Vad->u2.VadFlags2.LongVad = TRUE;
|
|
|
|
Vad->u2.VadFlags2.ReadOnly = FALSE;
|
|
|
|
|
|
|
|
/* Lock the process address space */
|
|
|
|
KeAcquireGuardedMutex(&Process->AddressCreationLock);
|
|
|
|
|
|
|
|
/* Check if this is a PEB creation */
|
|
|
|
if (Size == sizeof(PEB))
|
|
|
|
{
|
|
|
|
/* Start at the highest valid address */
|
|
|
|
StartAddress = (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1;
|
|
|
|
|
|
|
|
/* Select the random coefficient */
|
|
|
|
KeQueryTickCount(&CurrentTime);
|
|
|
|
CurrentTime.LowPart &= ((64 * _1KB) >> PAGE_SHIFT) - 1;
|
|
|
|
if (CurrentTime.LowPart <= 1) CurrentTime.LowPart = 2;
|
|
|
|
RandomCoeff = CurrentTime.LowPart << PAGE_SHIFT;
|
|
|
|
|
|
|
|
/* Select the highest valid address minus the random coefficient */
|
|
|
|
StartAddress -= RandomCoeff;
|
|
|
|
EndAddress = StartAddress + ROUND_TO_PAGES(Size) - 1;
|
|
|
|
|
|
|
|
/* See if this VA range can be obtained */
|
|
|
|
if (!MiCheckForConflictingNode(StartAddress >> PAGE_SHIFT,
|
|
|
|
EndAddress >> PAGE_SHIFT,
|
|
|
|
&Process->VadRoot))
|
|
|
|
{
|
|
|
|
/* No conflict, use this address */
|
|
|
|
*Base = StartAddress;
|
|
|
|
goto AfterFound;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For TEBs, or if a PEB location couldn't be found, scan the VAD root */
|
|
|
|
Status = MiFindEmptyAddressRangeDownTree(ROUND_TO_PAGES(Size),
|
|
|
|
(ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1,
|
|
|
|
PAGE_SIZE,
|
|
|
|
&Process->VadRoot,
|
|
|
|
Base);
|
|
|
|
ASSERT(NT_SUCCESS(Status));
|
|
|
|
|
|
|
|
AfterFound:
|
|
|
|
/* Validate that it came from the VAD ranges */
|
|
|
|
ASSERT(*Base >= (ULONG_PTR)MI_LOWEST_VAD_ADDRESS);
|
|
|
|
|
|
|
|
/* Build the rest of the VAD now */
|
|
|
|
Vad->StartingVpn = (*Base) >> PAGE_SHIFT;
|
|
|
|
Vad->EndingVpn = ((*Base) + Size - 1) >> PAGE_SHIFT;
|
|
|
|
Vad->u3.Secured.StartVpn = *Base;
|
|
|
|
Vad->u3.Secured.EndVpn = (Vad->EndingVpn << PAGE_SHIFT) | (PAGE_SIZE - 1);
|
|
|
|
|
|
|
|
/* FIXME: Should setup VAD bitmap */
|
|
|
|
Status = STATUS_SUCCESS;
|
|
|
|
|
|
|
|
/* Pretend as if we own the working set */
|
|
|
|
MiLockProcessWorkingSet(Process, Thread);
|
|
|
|
|
|
|
|
/* Insert the VAD */
|
|
|
|
ASSERT(Vad->EndingVpn >= Vad->StartingVpn);
|
|
|
|
Process->VadRoot.NodeHint = Vad;
|
|
|
|
MiInsertNode((PVOID)Vad, &Process->VadRoot);
|
|
|
|
|
|
|
|
/* Release the working set */
|
|
|
|
MiUnlockProcessWorkingSet(Process, Thread);
|
|
|
|
|
|
|
|
/* Release the address space lock */
|
|
|
|
KeReleaseGuardedMutex(&Process->AddressCreationLock);
|
|
|
|
|
|
|
|
/* Return the status */
|
|
|
|
DPRINT("Allocated PEB/TEB at: 0x%p for %16s\n", *Base, Process->ImageFileName);
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MmDeleteTeb(IN PEPROCESS Process,
|
|
|
|
IN PTEB Teb)
|
|
|
|
{
|
2010-07-24 16:28:51 +00:00
|
|
|
ULONG_PTR TebEnd;
|
|
|
|
PETHREAD Thread = PsGetCurrentThread();
|
|
|
|
PMMVAD Vad;
|
|
|
|
PMM_AVL_TABLE VadTree = &Process->VadRoot;
|
|
|
|
DPRINT("Deleting TEB: %p in %16s\n", Teb, Process->ImageFileName);
|
|
|
|
|
|
|
|
/* TEB is one page */
|
|
|
|
TebEnd = (ULONG_PTR)Teb + ROUND_TO_PAGES(sizeof(TEB)) - 1;
|
|
|
|
|
|
|
|
/* Attach to the process */
|
|
|
|
KeAttachProcess(&Process->Pcb);
|
|
|
|
|
|
|
|
/* Lock the process address space */
|
|
|
|
KeAcquireGuardedMutex(&Process->AddressCreationLock);
|
|
|
|
|
|
|
|
/* Find the VAD, make sure it's a TEB VAD */
|
|
|
|
Vad = MiLocateAddress(Teb);
|
|
|
|
DPRINT("Removing node for VAD: %lx %lx\n", Vad->StartingVpn, Vad->EndingVpn);
|
|
|
|
ASSERT(Vad != NULL);
|
|
|
|
if (Vad->StartingVpn != ((ULONG_PTR)Teb >> PAGE_SHIFT))
|
|
|
|
{
|
|
|
|
/* Bug in the AVL code? */
|
|
|
|
DPRINT1("Corrupted VAD!\n");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Sanity checks for a valid TEB VAD */
|
|
|
|
ASSERT((Vad->StartingVpn == ((ULONG_PTR)Teb >> PAGE_SHIFT) &&
|
|
|
|
(Vad->EndingVpn == (TebEnd >> PAGE_SHIFT))));
|
|
|
|
ASSERT(Vad->u.VadFlags.NoChange == TRUE);
|
|
|
|
ASSERT(Vad->u2.VadFlags2.MultipleSecured == FALSE);
|
|
|
|
|
|
|
|
/* Lock the working set */
|
|
|
|
MiLockProcessWorkingSet(Process, Thread);
|
|
|
|
|
|
|
|
/* Remove this VAD from the tree */
|
|
|
|
ASSERT(VadTree->NumberGenericTableElements >= 1);
|
|
|
|
MiRemoveNode((PMMADDRESS_NODE)Vad, VadTree);
|
|
|
|
|
|
|
|
/* Release the working set */
|
|
|
|
MiUnlockProcessWorkingSet(Process, Thread);
|
|
|
|
|
|
|
|
/* Remove the VAD */
|
|
|
|
ExFreePool(Vad);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release the address space lock */
|
|
|
|
KeReleaseGuardedMutex(&Process->AddressCreationLock);
|
|
|
|
|
|
|
|
/* Detach */
|
|
|
|
KeDetachProcess();
|
2010-07-22 19:08:45 +00:00
|
|
|
}
|
|
|
|
|
2009-06-27 08:41:45 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MmDeleteKernelStack(IN PVOID StackBase,
|
|
|
|
IN BOOLEAN GuiStack)
|
|
|
|
{
|
|
|
|
PMMPTE PointerPte;
|
2010-05-29 18:33:50 +00:00
|
|
|
PFN_NUMBER StackPages, PageFrameNumber;//, PageTableFrameNumber;
|
|
|
|
PMMPFN Pfn1;//, Pfn2;
|
2009-06-27 08:41:45 +00:00
|
|
|
ULONG i;
|
2010-05-29 18:33:50 +00:00
|
|
|
KIRQL OldIrql;
|
2009-06-27 08:41:45 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// This should be the guard page, so decrement by one
|
|
|
|
//
|
|
|
|
PointerPte = MiAddressToPte(StackBase);
|
|
|
|
PointerPte--;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Calculate pages used
|
|
|
|
//
|
|
|
|
StackPages = BYTES_TO_PAGES(GuiStack ?
|
|
|
|
KERNEL_LARGE_STACK_SIZE : KERNEL_STACK_SIZE);
|
|
|
|
|
2010-05-29 18:33:50 +00:00
|
|
|
/* Acquire the PFN lock */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
|
|
|
|
2009-06-27 08:41:45 +00:00
|
|
|
//
|
|
|
|
// Loop them
|
|
|
|
//
|
|
|
|
for (i = 0; i < StackPages; i++)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Check if this is a valid PTE
|
|
|
|
//
|
|
|
|
if (PointerPte->u.Hard.Valid == 1)
|
|
|
|
{
|
2010-05-29 18:33:50 +00:00
|
|
|
/* Get the PTE's page */
|
|
|
|
PageFrameNumber = PFN_FROM_PTE(PointerPte);
|
|
|
|
Pfn1 = MiGetPfnEntry(PageFrameNumber);
|
|
|
|
#if 0 // ARM3 might not own the page table, so don't take this risk. Leak it instead!
|
|
|
|
/* Now get the page of the page table mapping it */
|
|
|
|
PageTableFrameNumber = Pfn1->u4.PteFrame;
|
|
|
|
Pfn2 = MiGetPfnEntry(PageTableFrameNumber);
|
|
|
|
|
|
|
|
/* Remove a shared reference, since the page is going away */
|
|
|
|
MiDecrementShareCount(Pfn2, PageTableFrameNumber);
|
|
|
|
#endif
|
|
|
|
/* Set the special pending delete marker */
|
|
|
|
Pfn1->PteAddress = (PMMPTE)((ULONG_PTR)Pfn1->PteAddress | 1);
|
|
|
|
|
|
|
|
/* And now delete the actual stack page */
|
|
|
|
MiDecrementShareCount(Pfn1, PageFrameNumber);
|
2009-06-27 08:41:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Next one
|
|
|
|
//
|
|
|
|
PointerPte--;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// We should be at the guard page now
|
|
|
|
//
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 0);
|
|
|
|
|
2010-05-29 18:33:50 +00:00
|
|
|
/* Release the PFN lock */
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
|
2009-06-27 08:41:45 +00:00
|
|
|
//
|
|
|
|
// Release the PTEs
|
|
|
|
//
|
|
|
|
MiReleaseSystemPtes(PointerPte, StackPages + 1, SystemPteSpace);
|
|
|
|
}
|
|
|
|
|
|
|
|
PVOID
|
|
|
|
NTAPI
|
|
|
|
MmCreateKernelStack(IN BOOLEAN GuiStack,
|
|
|
|
IN UCHAR Node)
|
|
|
|
{
|
|
|
|
PFN_NUMBER StackPtes, StackPages;
|
|
|
|
PMMPTE PointerPte, StackPte;
|
|
|
|
PVOID BaseAddress;
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
MMPTE TempPte, InvalidPte;
|
2009-06-27 08:41:45 +00:00
|
|
|
KIRQL OldIrql;
|
|
|
|
PFN_NUMBER PageFrameIndex;
|
|
|
|
ULONG i;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Calculate pages needed
|
|
|
|
//
|
|
|
|
if (GuiStack)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// We'll allocate 64KB stack, but only commit 12K
|
|
|
|
//
|
|
|
|
StackPtes = BYTES_TO_PAGES(KERNEL_LARGE_STACK_SIZE);
|
|
|
|
StackPages = BYTES_TO_PAGES(KERNEL_LARGE_STACK_COMMIT);
|
|
|
|
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// We'll allocate 12K and that's it
|
|
|
|
//
|
|
|
|
StackPtes = BYTES_TO_PAGES(KERNEL_STACK_SIZE);
|
|
|
|
StackPages = StackPtes;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Reserve stack pages, plus a guard page
|
|
|
|
//
|
|
|
|
StackPte = MiReserveSystemPtes(StackPtes + 1, SystemPteSpace);
|
|
|
|
if (!StackPte) return NULL;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Get the stack address
|
|
|
|
//
|
|
|
|
BaseAddress = MiPteToAddress(StackPte + StackPtes + 1);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Select the right PTE address where we actually start committing pages
|
|
|
|
//
|
|
|
|
PointerPte = StackPte;
|
|
|
|
if (GuiStack) PointerPte += BYTES_TO_PAGES(KERNEL_LARGE_STACK_SIZE -
|
|
|
|
KERNEL_LARGE_STACK_COMMIT);
|
|
|
|
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
|
|
|
|
/* Setup the temporary invalid PTE */
|
|
|
|
MI_MAKE_SOFTWARE_PTE(&InvalidPte, MM_NOACCESS);
|
|
|
|
|
|
|
|
/* Setup the template stack PTE */
|
2010-07-22 02:20:27 +00:00
|
|
|
MI_MAKE_HARDWARE_PTE_KERNEL(&TempPte, PointerPte + 1, MM_READWRITE, 0);
|
2009-06-27 08:41:45 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Acquire the PFN DB lock
|
|
|
|
//
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Loop each stack page
|
|
|
|
//
|
|
|
|
for (i = 0; i < StackPages; i++)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Next PTE
|
|
|
|
//
|
|
|
|
PointerPte++;
|
|
|
|
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
/* Get a page and write the current invalid PTE */
|
2010-05-29 18:33:50 +00:00
|
|
|
PageFrameIndex = MiRemoveAnyPage(0);
|
2010-06-06 18:45:46 +00:00
|
|
|
MI_WRITE_INVALID_PTE(PointerPte, InvalidPte);
|
|
|
|
|
2010-05-29 18:33:50 +00:00
|
|
|
/* Initialize the PFN entry for this page */
|
|
|
|
MiInitializePfn(PageFrameIndex, PointerPte, 1);
|
|
|
|
|
|
|
|
/* Write the valid PTE */
|
|
|
|
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
|
2010-06-06 18:45:46 +00:00
|
|
|
MI_WRITE_VALID_PTE(PointerPte, TempPte);
|
2009-06-27 08:41:45 +00:00
|
|
|
}
|
2010-01-02 20:41:04 +00:00
|
|
|
|
|
|
|
// Bug #4835
|
2010-01-02 20:49:59 +00:00
|
|
|
(VOID)InterlockedExchangeAddUL(&MiMemoryConsumers[MC_NPPOOL].PagesUsed, StackPages);
|
2010-01-02 20:41:04 +00:00
|
|
|
|
2009-06-27 08:41:45 +00:00
|
|
|
//
|
|
|
|
// Release the PFN lock
|
|
|
|
//
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Return the stack address
|
|
|
|
//
|
|
|
|
return BaseAddress;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmGrowKernelStackEx(IN PVOID StackPointer,
|
|
|
|
IN ULONG GrowSize)
|
|
|
|
{
|
|
|
|
PKTHREAD Thread = KeGetCurrentThread();
|
|
|
|
PMMPTE LimitPte, NewLimitPte, LastPte;
|
|
|
|
PFN_NUMBER StackPages;
|
|
|
|
KIRQL OldIrql;
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
MMPTE TempPte, InvalidPte;
|
2009-06-27 08:41:45 +00:00
|
|
|
PFN_NUMBER PageFrameIndex;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Make sure the stack did not overflow
|
|
|
|
//
|
|
|
|
ASSERT(((ULONG_PTR)Thread->StackBase - (ULONG_PTR)Thread->StackLimit) <=
|
|
|
|
(KERNEL_LARGE_STACK_SIZE + PAGE_SIZE));
|
|
|
|
|
|
|
|
//
|
|
|
|
// Get the current stack limit
|
|
|
|
//
|
|
|
|
LimitPte = MiAddressToPte(Thread->StackLimit);
|
|
|
|
ASSERT(LimitPte->u.Hard.Valid == 1);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Get the new one and make sure this isn't a retarded request
|
|
|
|
//
|
|
|
|
NewLimitPte = MiAddressToPte((PVOID)((ULONG_PTR)StackPointer - GrowSize));
|
|
|
|
if (NewLimitPte == LimitPte) return STATUS_SUCCESS;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Now make sure you're not going past the reserved space
|
|
|
|
//
|
|
|
|
LastPte = MiAddressToPte((PVOID)((ULONG_PTR)Thread->StackBase -
|
|
|
|
KERNEL_LARGE_STACK_SIZE));
|
|
|
|
if (NewLimitPte < LastPte)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Sorry!
|
|
|
|
//
|
|
|
|
DPRINT1("Thread wants too much stack\n");
|
|
|
|
return STATUS_STACK_OVERFLOW;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Calculate the number of new pages
|
|
|
|
//
|
|
|
|
LimitPte--;
|
|
|
|
StackPages = (LimitPte - NewLimitPte + 1);
|
|
|
|
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
/* Setup the temporary invalid PTE */
|
|
|
|
MI_MAKE_SOFTWARE_PTE(&InvalidPte, MM_NOACCESS);
|
2009-06-27 08:41:45 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Acquire the PFN DB lock
|
|
|
|
//
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
2010-05-12 22:47:46 +00:00
|
|
|
|
2009-06-27 08:41:45 +00:00
|
|
|
//
|
|
|
|
// Loop each stack page
|
|
|
|
//
|
|
|
|
while (LimitPte >= NewLimitPte)
|
|
|
|
{
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
/* Get a page and write the current invalid PTE */
|
2010-05-29 18:33:50 +00:00
|
|
|
PageFrameIndex = MiRemoveAnyPage(0);
|
2010-06-06 18:45:46 +00:00
|
|
|
MI_WRITE_INVALID_PTE(LimitPte, InvalidPte);
|
|
|
|
|
2010-05-29 18:33:50 +00:00
|
|
|
/* Initialize the PFN entry for this page */
|
|
|
|
MiInitializePfn(PageFrameIndex, LimitPte, 1);
|
2009-06-27 08:41:45 +00:00
|
|
|
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
/* Setup the template stack PTE */
|
2010-07-22 02:20:27 +00:00
|
|
|
MI_MAKE_HARDWARE_PTE_KERNEL(&TempPte, LimitPte, MM_READWRITE, PageFrameIndex);
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
|
2010-05-12 22:47:46 +00:00
|
|
|
/* Write the valid PTE */
|
2010-06-06 18:45:46 +00:00
|
|
|
MI_WRITE_VALID_PTE(LimitPte--, TempPte);
|
2009-06-27 08:41:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Release the PFN lock
|
|
|
|
//
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Set the new limit
|
|
|
|
//
|
|
|
|
Thread->StackLimit = (ULONG_PTR)MiPteToAddress(NewLimitPte);
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmGrowKernelStack(IN PVOID StackPointer)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Call the extended version
|
|
|
|
//
|
|
|
|
return MmGrowKernelStackEx(StackPointer, KERNEL_LARGE_STACK_COMMIT);
|
|
|
|
}
|
|
|
|
|
2009-10-15 05:56:41 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmSetMemoryPriorityProcess(IN PEPROCESS Process,
|
|
|
|
IN UCHAR MemoryPriority)
|
|
|
|
{
|
|
|
|
UCHAR OldPriority;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Check if we have less then 16MB of Physical Memory
|
|
|
|
//
|
|
|
|
if ((MmSystemSize == MmSmallSystem) &&
|
2009-10-18 13:55:44 +00:00
|
|
|
(MmNumberOfPhysicalPages < ((15 * 1024 * 1024) / PAGE_SIZE)))
|
2009-10-15 05:56:41 +00:00
|
|
|
{
|
|
|
|
//
|
|
|
|
// Always use background priority
|
|
|
|
//
|
|
|
|
MemoryPriority = MEMORY_PRIORITY_BACKGROUND;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Save the old priority and update it
|
|
|
|
//
|
|
|
|
OldPriority = (UCHAR)Process->Vm.Flags.MemoryPriority;
|
|
|
|
Process->Vm.Flags.MemoryPriority = MemoryPriority;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Return the old priority
|
|
|
|
//
|
|
|
|
return OldPriority;
|
|
|
|
}
|
|
|
|
|
|
|
|
LCID
|
|
|
|
NTAPI
|
|
|
|
MmGetSessionLocaleId(VOID)
|
|
|
|
{
|
|
|
|
PEPROCESS Process;
|
|
|
|
PAGED_CODE();
|
|
|
|
|
|
|
|
//
|
|
|
|
// Get the current process
|
|
|
|
//
|
|
|
|
Process = PsGetCurrentProcess();
|
|
|
|
|
|
|
|
//
|
|
|
|
// Check if it's the Session Leader
|
|
|
|
//
|
|
|
|
if (Process->Vm.Flags.SessionLeader)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Make sure it has a valid Session
|
|
|
|
//
|
|
|
|
if (Process->Session)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Get the Locale ID
|
|
|
|
//
|
|
|
|
#if ROS_HAS_SESSIONS
|
|
|
|
return ((PMM_SESSION_SPACE)Process->Session)->LocaleId;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Not a session leader, return the default
|
|
|
|
//
|
|
|
|
return PsDefaultThreadLocaleId;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmCreatePeb(IN PEPROCESS Process,
|
|
|
|
IN PINITIAL_PEB InitialPeb,
|
|
|
|
OUT PPEB *BasePeb)
|
|
|
|
{
|
|
|
|
PPEB Peb = NULL;
|
|
|
|
LARGE_INTEGER SectionOffset;
|
|
|
|
SIZE_T ViewSize = 0;
|
|
|
|
PVOID TableBase = NULL;
|
|
|
|
PIMAGE_NT_HEADERS NtHeaders;
|
|
|
|
PIMAGE_LOAD_CONFIG_DIRECTORY ImageConfigData;
|
|
|
|
NTSTATUS Status;
|
|
|
|
USHORT Characteristics;
|
|
|
|
KAFFINITY ProcessAffinityMask = 0;
|
|
|
|
SectionOffset.QuadPart = (ULONGLONG)0;
|
|
|
|
*BasePeb = NULL;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Attach to Process
|
|
|
|
//
|
|
|
|
KeAttachProcess(&Process->Pcb);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Allocate the PEB
|
|
|
|
//
|
2010-07-22 19:08:45 +00:00
|
|
|
Status = MiCreatePebOrTeb(Process, sizeof(PEB), (PULONG_PTR)&Peb);
|
|
|
|
ASSERT(NT_SUCCESS(Status));
|
[NTOS]: MiRosTakeOverPebTebRanges now creates a small ~1MB ARM3 memory range on top of the ReactOS per-process VA. This does a couple of things: First of all, it changes the default PEB address to another static address. Still not dynamic like it will be soon, but at least it changes it a bit so we can test if anything breaks due to that. It also likewise changes the addresses of the TEBs (Shifted down by 1MB, basically). Finally, it blocks off that part of address space, which nobody should be using now, to see if anyone does indeed touch it.
[NTOS]: Knowing if this change causes issues will help later in determining regressions due to TEB/PEBs mapped as VADs by ARM3, and regressions simply due to the change in VA layout.
[NTOS]: When implemented, the VAD mapping for PEB/TEB will only use that ~1MB, which yes, will limit ReactOS processes to each have only 256 threads. That is obviously a temporary limitation, one I doubt we'll even hit, but I'm putting it out here so you know.
svn path=/trunk/; revision=48176
2010-07-22 03:22:43 +00:00
|
|
|
|
2009-10-15 05:56:41 +00:00
|
|
|
//
|
|
|
|
// Map NLS Tables
|
|
|
|
//
|
|
|
|
Status = MmMapViewOfSection(ExpNlsSectionPointer,
|
|
|
|
(PEPROCESS)Process,
|
|
|
|
&TableBase,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
&SectionOffset,
|
|
|
|
&ViewSize,
|
|
|
|
ViewShare,
|
|
|
|
MEM_TOP_DOWN,
|
|
|
|
PAGE_READONLY);
|
|
|
|
if (!NT_SUCCESS(Status)) return Status;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Use SEH in case we can't load the PEB
|
|
|
|
//
|
|
|
|
_SEH2_TRY
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Initialize the PEB
|
|
|
|
//
|
|
|
|
RtlZeroMemory(Peb, sizeof(PEB));
|
|
|
|
|
|
|
|
//
|
|
|
|
// Set up data
|
|
|
|
//
|
|
|
|
Peb->ImageBaseAddress = Process->SectionBaseAddress;
|
|
|
|
Peb->InheritedAddressSpace = InitialPeb->InheritedAddressSpace;
|
|
|
|
Peb->Mutant = InitialPeb->Mutant;
|
|
|
|
Peb->ImageUsesLargePages = InitialPeb->ImageUsesLargePages;
|
|
|
|
|
|
|
|
//
|
|
|
|
// NLS
|
|
|
|
//
|
|
|
|
Peb->AnsiCodePageData = (PCHAR)TableBase + ExpAnsiCodePageDataOffset;
|
|
|
|
Peb->OemCodePageData = (PCHAR)TableBase + ExpOemCodePageDataOffset;
|
|
|
|
Peb->UnicodeCaseTableData = (PCHAR)TableBase + ExpUnicodeCaseTableDataOffset;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Default Version Data (could get changed below)
|
|
|
|
//
|
|
|
|
Peb->OSMajorVersion = NtMajorVersion;
|
|
|
|
Peb->OSMinorVersion = NtMinorVersion;
|
|
|
|
Peb->OSBuildNumber = (USHORT)(NtBuildNumber & 0x3FFF);
|
|
|
|
Peb->OSPlatformId = 2; /* VER_PLATFORM_WIN32_NT */
|
|
|
|
Peb->OSCSDVersion = (USHORT)CmNtCSDVersion;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Heap and Debug Data
|
|
|
|
//
|
|
|
|
Peb->NumberOfProcessors = KeNumberProcessors;
|
|
|
|
Peb->BeingDebugged = (BOOLEAN)(Process->DebugPort != NULL ? TRUE : FALSE);
|
|
|
|
Peb->NtGlobalFlag = NtGlobalFlag;
|
|
|
|
/*Peb->HeapSegmentReserve = MmHeapSegmentReserve;
|
|
|
|
Peb->HeapSegmentCommit = MmHeapSegmentCommit;
|
|
|
|
Peb->HeapDeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold;
|
|
|
|
Peb->HeapDeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold;
|
|
|
|
Peb->CriticalSectionTimeout = MmCriticalSectionTimeout;
|
|
|
|
Peb->MinimumStackCommit = MmMinimumStackCommitInBytes;
|
|
|
|
*/
|
|
|
|
Peb->MaximumNumberOfHeaps = (PAGE_SIZE - sizeof(PEB)) / sizeof(PVOID);
|
|
|
|
Peb->ProcessHeaps = (PVOID*)(Peb + 1);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Session ID
|
|
|
|
//
|
|
|
|
if (Process->Session) Peb->SessionId = 0; // MmGetSessionId(Process);
|
|
|
|
}
|
|
|
|
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Fail
|
|
|
|
//
|
|
|
|
KeDetachProcess();
|
|
|
|
_SEH2_YIELD(return _SEH2_GetExceptionCode());
|
|
|
|
}
|
|
|
|
_SEH2_END;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Use SEH in case we can't load the image
|
|
|
|
//
|
|
|
|
_SEH2_TRY
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Get NT Headers
|
|
|
|
//
|
|
|
|
NtHeaders = RtlImageNtHeader(Peb->ImageBaseAddress);
|
|
|
|
Characteristics = NtHeaders->FileHeader.Characteristics;
|
|
|
|
}
|
|
|
|
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Fail
|
|
|
|
//
|
|
|
|
KeDetachProcess();
|
|
|
|
_SEH2_YIELD(return STATUS_INVALID_IMAGE_PROTECT);
|
|
|
|
}
|
|
|
|
_SEH2_END;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Parse the headers
|
|
|
|
//
|
|
|
|
if (NtHeaders)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Use SEH in case we can't load the headers
|
|
|
|
//
|
|
|
|
_SEH2_TRY
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Get the Image Config Data too
|
|
|
|
//
|
|
|
|
ImageConfigData = RtlImageDirectoryEntryToData(Peb->ImageBaseAddress,
|
|
|
|
TRUE,
|
|
|
|
IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG,
|
2010-07-16 00:34:26 +00:00
|
|
|
(PULONG)&ViewSize);
|
2009-10-15 05:56:41 +00:00
|
|
|
if (ImageConfigData)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Probe it
|
|
|
|
//
|
|
|
|
ProbeForRead(ImageConfigData,
|
|
|
|
sizeof(IMAGE_LOAD_CONFIG_DIRECTORY),
|
|
|
|
sizeof(ULONG));
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Write subsystem data
|
|
|
|
//
|
2009-12-10 00:35:12 +00:00
|
|
|
Peb->ImageSubsystem = NtHeaders->OptionalHeader.Subsystem;
|
|
|
|
Peb->ImageSubsystemMajorVersion = NtHeaders->OptionalHeader.MajorSubsystemVersion;
|
|
|
|
Peb->ImageSubsystemMinorVersion = NtHeaders->OptionalHeader.MinorSubsystemVersion;
|
2009-10-15 05:56:41 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Check for version data
|
|
|
|
//
|
|
|
|
if (NtHeaders->OptionalHeader.Win32VersionValue)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Extract values and write them
|
|
|
|
//
|
|
|
|
Peb->OSMajorVersion = NtHeaders->OptionalHeader.Win32VersionValue & 0xFF;
|
|
|
|
Peb->OSMinorVersion = (NtHeaders->OptionalHeader.Win32VersionValue >> 8) & 0xFF;
|
|
|
|
Peb->OSBuildNumber = (NtHeaders->OptionalHeader.Win32VersionValue >> 16) & 0x3FFF;
|
|
|
|
Peb->OSPlatformId = (NtHeaders->OptionalHeader.Win32VersionValue >> 30) ^ 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Process the image config data overrides if specfied
|
|
|
|
//
|
|
|
|
if (ImageConfigData != NULL)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Process CSD version override
|
|
|
|
//
|
|
|
|
if (ImageConfigData->CSDVersion)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Set new data
|
|
|
|
//
|
|
|
|
Peb->OSCSDVersion = ImageConfigData->CSDVersion;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Process affinity mask ovverride
|
|
|
|
//
|
|
|
|
if (ImageConfigData->ProcessAffinityMask)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Set new data
|
|
|
|
//
|
|
|
|
ProcessAffinityMask = ImageConfigData->ProcessAffinityMask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Check if this is a UP image
|
|
|
|
if (Characteristics & IMAGE_FILE_UP_SYSTEM_ONLY)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Force it to use CPU 0
|
|
|
|
//
|
|
|
|
Peb->ImageProcessAffinityMask = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Whatever was configured
|
|
|
|
//
|
|
|
|
Peb->ImageProcessAffinityMask = ProcessAffinityMask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Fail
|
|
|
|
//
|
|
|
|
KeDetachProcess();
|
|
|
|
_SEH2_YIELD(return STATUS_INVALID_IMAGE_PROTECT);
|
|
|
|
}
|
|
|
|
_SEH2_END;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Detach from the Process
|
|
|
|
//
|
|
|
|
KeDetachProcess();
|
|
|
|
*BasePeb = Peb;
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmCreateTeb(IN PEPROCESS Process,
|
|
|
|
IN PCLIENT_ID ClientId,
|
|
|
|
IN PINITIAL_TEB InitialTeb,
|
|
|
|
OUT PTEB *BaseTeb)
|
|
|
|
{
|
|
|
|
PTEB Teb;
|
|
|
|
NTSTATUS Status = STATUS_SUCCESS;
|
|
|
|
*BaseTeb = NULL;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Attach to Target
|
|
|
|
//
|
|
|
|
KeAttachProcess(&Process->Pcb);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Allocate the TEB
|
|
|
|
//
|
2010-07-22 19:08:45 +00:00
|
|
|
Status = MiCreatePebOrTeb(Process, sizeof(TEB), (PULONG_PTR)&Teb);
|
|
|
|
ASSERT(NT_SUCCESS(Status));
|
2009-10-15 05:56:41 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Use SEH in case we can't load the TEB
|
|
|
|
//
|
|
|
|
_SEH2_TRY
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Initialize the PEB
|
|
|
|
//
|
|
|
|
RtlZeroMemory(Teb, sizeof(TEB));
|
|
|
|
|
|
|
|
//
|
|
|
|
// Set TIB Data
|
|
|
|
//
|
2009-12-10 00:35:12 +00:00
|
|
|
Teb->NtTib.ExceptionList = EXCEPTION_CHAIN_END;
|
|
|
|
Teb->NtTib.Self = (PNT_TIB)Teb;
|
2009-10-15 05:56:41 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Identify this as an OS/2 V3.0 ("Cruiser") TIB
|
|
|
|
//
|
2009-12-10 00:35:12 +00:00
|
|
|
Teb->NtTib.Version = 30 << 8;
|
2009-10-15 05:56:41 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Set TEB Data
|
|
|
|
//
|
|
|
|
Teb->ClientId = *ClientId;
|
|
|
|
Teb->RealClientId = *ClientId;
|
|
|
|
Teb->ProcessEnvironmentBlock = Process->Peb;
|
|
|
|
Teb->CurrentLocale = PsDefaultThreadLocaleId;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Check if we have a grandparent TEB
|
|
|
|
//
|
|
|
|
if ((InitialTeb->PreviousStackBase == NULL) &&
|
|
|
|
(InitialTeb->PreviousStackLimit == NULL))
|
|
|
|
{
|
|
|
|
//
|
2009-10-15 16:47:26 +00:00
|
|
|
// Use initial TEB values
|
2009-10-15 05:56:41 +00:00
|
|
|
//
|
2009-12-10 00:35:12 +00:00
|
|
|
Teb->NtTib.StackBase = InitialTeb->StackBase;
|
|
|
|
Teb->NtTib.StackLimit = InitialTeb->StackLimit;
|
2009-10-15 16:47:26 +00:00
|
|
|
Teb->DeallocationStack = InitialTeb->AllocatedStackBase;
|
2009-10-15 05:56:41 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
2009-10-15 16:47:26 +00:00
|
|
|
// Use grandparent TEB values
|
2009-10-15 05:56:41 +00:00
|
|
|
//
|
2009-12-10 00:35:12 +00:00
|
|
|
Teb->NtTib.StackBase = InitialTeb->PreviousStackBase;
|
|
|
|
Teb->NtTib.StackLimit = InitialTeb->PreviousStackLimit;
|
2009-10-15 05:56:41 +00:00
|
|
|
}
|
2009-10-15 16:47:26 +00:00
|
|
|
|
2009-10-15 05:56:41 +00:00
|
|
|
//
|
|
|
|
// Initialize the static unicode string
|
|
|
|
//
|
|
|
|
Teb->StaticUnicodeString.MaximumLength = sizeof(Teb->StaticUnicodeBuffer);
|
|
|
|
Teb->StaticUnicodeString.Buffer = Teb->StaticUnicodeBuffer;
|
|
|
|
}
|
|
|
|
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Get error code
|
|
|
|
//
|
|
|
|
Status = _SEH2_GetExceptionCode();
|
|
|
|
}
|
|
|
|
_SEH2_END;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Return
|
|
|
|
//
|
|
|
|
KeDetachProcess();
|
|
|
|
*BaseTeb = Teb;
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
2010-07-24 15:01:05 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmInitializeProcessAddressSpace(IN PEPROCESS Process,
|
|
|
|
IN PEPROCESS ProcessClone OPTIONAL,
|
|
|
|
IN PVOID Section OPTIONAL,
|
|
|
|
IN OUT PULONG Flags,
|
|
|
|
IN POBJECT_NAME_INFORMATION *AuditName OPTIONAL)
|
|
|
|
{
|
|
|
|
NTSTATUS Status = STATUS_SUCCESS;
|
|
|
|
SIZE_T ViewSize = 0;
|
|
|
|
PVOID ImageBase = 0;
|
|
|
|
PROS_SECTION_OBJECT SectionObject = Section;
|
|
|
|
PMMPTE PointerPte;
|
|
|
|
KIRQL OldIrql;
|
|
|
|
PMMPDE PointerPde;
|
|
|
|
PFN_NUMBER PageFrameNumber;
|
|
|
|
UNICODE_STRING FileName;
|
|
|
|
PWCHAR Source;
|
|
|
|
PCHAR Destination;
|
|
|
|
USHORT Length = 0;
|
|
|
|
|
|
|
|
/* We should have a PDE */
|
|
|
|
ASSERT(Process->Pcb.DirectoryTableBase[0] != 0);
|
|
|
|
ASSERT(Process->PdeUpdateNeeded == FALSE);
|
|
|
|
|
|
|
|
/* Attach to the process */
|
|
|
|
KeAttachProcess(&Process->Pcb);
|
|
|
|
|
|
|
|
/* The address space should now been in phase 1 or 0 */
|
|
|
|
ASSERT(Process->AddressSpaceInitialized <= 1);
|
|
|
|
Process->AddressSpaceInitialized = 2;
|
|
|
|
|
|
|
|
/* Initialize the Addresss Space lock */
|
|
|
|
KeInitializeGuardedMutex(&Process->AddressCreationLock);
|
|
|
|
Process->Vm.WorkingSetExpansionLinks.Flink = NULL;
|
|
|
|
|
|
|
|
/* Initialize AVL tree */
|
|
|
|
ASSERT(Process->VadRoot.NumberGenericTableElements == 0);
|
|
|
|
Process->VadRoot.BalancedRoot.u1.Parent = &Process->VadRoot.BalancedRoot;
|
|
|
|
|
|
|
|
/* Lock PFN database */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
|
|
|
|
|
|
|
/* Setup the PFN for the PDE base of this process */
|
|
|
|
PointerPte = MiAddressToPte(PDE_BASE);
|
|
|
|
PageFrameNumber = PFN_FROM_PTE(PointerPte);
|
2010-07-24 15:30:24 +00:00
|
|
|
MiInitializePfn(PageFrameNumber, PointerPte, TRUE);
|
2010-07-24 15:01:05 +00:00
|
|
|
|
|
|
|
/* Do the same for hyperspace */
|
|
|
|
PointerPde = MiAddressToPde(HYPER_SPACE);
|
|
|
|
PageFrameNumber = PFN_FROM_PTE(PointerPde);
|
2010-07-24 15:30:24 +00:00
|
|
|
MiInitializePfn(PageFrameNumber, PointerPde, TRUE);
|
2010-07-24 15:01:05 +00:00
|
|
|
|
|
|
|
/* Release PFN lock */
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
|
|
|
|
/* Lock the VAD, ARM3-owned ranges away */
|
|
|
|
MiRosTakeOverPebTebRanges(Process);
|
|
|
|
|
|
|
|
/* Check if there's a Section Object */
|
|
|
|
if (SectionObject)
|
|
|
|
{
|
|
|
|
/* Determine the image file name and save it to EPROCESS */
|
|
|
|
FileName = SectionObject->FileObject->FileName;
|
|
|
|
Source = (PWCHAR)((PCHAR)FileName.Buffer + FileName.Length);
|
|
|
|
if (FileName.Buffer)
|
|
|
|
{
|
|
|
|
/* Loop the file name*/
|
|
|
|
while (Source > FileName.Buffer)
|
|
|
|
{
|
|
|
|
/* Make sure this isn't a backslash */
|
|
|
|
if (*--Source == OBJ_NAME_PATH_SEPARATOR)
|
|
|
|
{
|
|
|
|
/* If so, stop it here */
|
|
|
|
Source++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Otherwise, keep going */
|
|
|
|
Length++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy the to the process and truncate it to 15 characters if necessary */
|
|
|
|
Destination = Process->ImageFileName;
|
|
|
|
Length = min(Length, sizeof(Process->ImageFileName) - 1);
|
|
|
|
while (Length--) *Destination++ = (UCHAR)*Source++;
|
|
|
|
*Destination = ANSI_NULL;
|
|
|
|
|
|
|
|
/* Check if caller wants an audit name */
|
|
|
|
if (AuditName)
|
|
|
|
{
|
|
|
|
/* Setup the audit name */
|
|
|
|
Status = SeInitializeProcessAuditName(SectionObject->FileObject,
|
|
|
|
FALSE,
|
|
|
|
AuditName);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
/* Fail */
|
|
|
|
KeDetachProcess();
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the section */
|
|
|
|
Status = MmMapViewOfSection(Section,
|
|
|
|
Process,
|
|
|
|
(PVOID*)&ImageBase,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&ViewSize,
|
|
|
|
0,
|
|
|
|
MEM_COMMIT,
|
|
|
|
PAGE_READWRITE);
|
|
|
|
|
|
|
|
/* Save the pointer */
|
|
|
|
Process->SectionBaseAddress = ImageBase;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Be nice and detach */
|
|
|
|
KeDetachProcess();
|
|
|
|
|
|
|
|
/* Return status to caller */
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmInitializeHandBuiltProcess(IN PEPROCESS Process,
|
|
|
|
IN PULONG_PTR DirectoryTableBase)
|
|
|
|
{
|
|
|
|
/* Share the directory base with the idle process */
|
|
|
|
DirectoryTableBase[0] = PsGetCurrentProcess()->Pcb.DirectoryTableBase[0];
|
|
|
|
DirectoryTableBase[1] = PsGetCurrentProcess()->Pcb.DirectoryTableBase[1];
|
|
|
|
|
|
|
|
/* Initialize the Addresss Space */
|
|
|
|
KeInitializeGuardedMutex(&Process->AddressCreationLock);
|
|
|
|
KeInitializeSpinLock(&Process->HyperSpaceLock);
|
|
|
|
Process->Vm.WorkingSetExpansionLinks.Flink = NULL;
|
|
|
|
ASSERT(Process->VadRoot.NumberGenericTableElements == 0);
|
|
|
|
Process->VadRoot.BalancedRoot.u1.Parent = &Process->VadRoot.BalancedRoot;
|
|
|
|
|
|
|
|
/* Done */
|
|
|
|
Process->HasAddressSpace = TRUE;//??
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmInitializeHandBuiltProcess2(IN PEPROCESS Process)
|
|
|
|
{
|
|
|
|
/* Lock the VAD, ARM3-owned ranges away */
|
|
|
|
MiRosTakeOverPebTebRanges(Process);
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2010-07-24 15:30:24 +00:00
|
|
|
/* FIXME: Evaluate ways to make this portable yet arch-specific */
|
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
MmCreateProcessAddressSpace(IN ULONG MinWs,
|
|
|
|
IN PEPROCESS Process,
|
|
|
|
OUT PULONG_PTR DirectoryTableBase)
|
|
|
|
{
|
|
|
|
KIRQL OldIrql;
|
|
|
|
PFN_NUMBER PdeIndex, HyperIndex;
|
|
|
|
PMMPTE PointerPte;
|
|
|
|
MMPTE TempPte, PdePte;
|
|
|
|
ULONG PdeOffset;
|
|
|
|
PMMPTE SystemTable;
|
|
|
|
|
|
|
|
/* No page colors yet */
|
|
|
|
Process->NextPageColor = 0;
|
|
|
|
|
|
|
|
/* Setup the hyperspace lock */
|
|
|
|
KeInitializeSpinLock(&Process->HyperSpaceLock);
|
|
|
|
|
|
|
|
/* Lock PFN database */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
|
|
|
|
|
|
|
/* Get a page for the PDE */
|
|
|
|
PdeIndex = MiRemoveAnyPage(0);
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
MiZeroPhysicalPage(PdeIndex);
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
|
|
|
|
|
|
|
|
/* Get a page for hyperspace */
|
|
|
|
HyperIndex = MiRemoveAnyPage(0);
|
|
|
|
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
|
|
|
|
MiZeroPhysicalPage(HyperIndex);
|
|
|
|
|
|
|
|
/* Switch to phase 1 initialization */
|
|
|
|
ASSERT(Process->AddressSpaceInitialized == 0);
|
|
|
|
Process->AddressSpaceInitialized = 1;
|
|
|
|
|
|
|
|
/* Set the base directory pointers */
|
|
|
|
DirectoryTableBase[0] = PdeIndex << PAGE_SHIFT;
|
|
|
|
DirectoryTableBase[1] = HyperIndex << PAGE_SHIFT;
|
|
|
|
|
|
|
|
/* Make sure we don't already have a page directory setup */
|
|
|
|
ASSERT(Process->Pcb.DirectoryTableBase[0] == 0);
|
|
|
|
|
|
|
|
/* Insert us into the Mm process list */
|
|
|
|
InsertTailList(&MmProcessList, &Process->MmProcessLinks);
|
|
|
|
|
|
|
|
/* Get a PTE to map the page directory */
|
|
|
|
PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
|
|
|
|
ASSERT(PointerPte != NULL);
|
|
|
|
|
|
|
|
/* Build it */
|
|
|
|
MI_MAKE_HARDWARE_PTE_KERNEL(&PdePte,
|
|
|
|
PointerPte,
|
|
|
|
MM_READWRITE,
|
|
|
|
PdeIndex);
|
|
|
|
|
|
|
|
/* Set it dirty and map it */
|
|
|
|
PdePte.u.Hard.Dirty = TRUE;
|
|
|
|
MI_WRITE_VALID_PTE(PointerPte, PdePte);
|
|
|
|
|
|
|
|
/* Now get the page directory (which we'll double map, so call it a page table */
|
|
|
|
SystemTable = MiPteToAddress(PointerPte);
|
|
|
|
|
|
|
|
/* Copy all the kernel mappings */
|
|
|
|
PdeOffset = MiGetPdeOffset(MmSystemRangeStart);
|
|
|
|
|
|
|
|
RtlCopyMemory(&SystemTable[PdeOffset],
|
|
|
|
MiAddressToPde(MmSystemRangeStart),
|
|
|
|
PAGE_SIZE - PdeOffset * sizeof(MMPTE));
|
|
|
|
|
|
|
|
/* Now write the PTE/PDE entry for hyperspace itself */
|
|
|
|
TempPte = ValidKernelPte;
|
|
|
|
TempPte.u.Hard.PageFrameNumber = HyperIndex;
|
|
|
|
PdeOffset = MiGetPdeOffset(HYPER_SPACE);
|
|
|
|
SystemTable[PdeOffset] = TempPte;
|
|
|
|
|
|
|
|
/* Sanity check */
|
|
|
|
PdeOffset++;
|
|
|
|
ASSERT(MiGetPdeOffset(MmHyperSpaceEnd) >= PdeOffset);
|
|
|
|
|
|
|
|
/* Now do the x86 trick of making the PDE a page table itself */
|
|
|
|
PdeOffset = MiGetPdeOffset(PTE_BASE);
|
|
|
|
TempPte.u.Hard.PageFrameNumber = PdeIndex;
|
|
|
|
SystemTable[PdeOffset] = TempPte;
|
|
|
|
|
|
|
|
/* Let go of the system PTE */
|
|
|
|
MiReleaseSystemPtes(PointerPte, 1, SystemPteSpace);
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2010-07-24 16:12:39 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MmCleanProcessAddressSpace(IN PEPROCESS Process)
|
|
|
|
{
|
|
|
|
PMMVAD Vad;
|
|
|
|
PMM_AVL_TABLE VadTree;
|
|
|
|
PETHREAD Thread = PsGetCurrentThread();
|
|
|
|
|
|
|
|
/* Lock the process address space from changes */
|
|
|
|
MmLockAddressSpace(&Process->Vm);
|
|
|
|
|
|
|
|
/* Enumerate the VADs */
|
|
|
|
VadTree = &Process->VadRoot;
|
|
|
|
DPRINT("Cleaning up VADs: %d\n", VadTree->NumberGenericTableElements);
|
|
|
|
while (VadTree->NumberGenericTableElements)
|
|
|
|
{
|
|
|
|
/* Grab the current VAD */
|
|
|
|
Vad = (PMMVAD)VadTree->BalancedRoot.RightChild;
|
|
|
|
|
|
|
|
/* Lock the working set */
|
|
|
|
MiLockProcessWorkingSet(Process, Thread);
|
|
|
|
|
|
|
|
/* Remove this VAD from the tree */
|
|
|
|
ASSERT(VadTree->NumberGenericTableElements >= 1);
|
|
|
|
DPRINT("Removing node for VAD: %lx %lx\n", Vad->StartingVpn, Vad->EndingVpn);
|
|
|
|
MiRemoveNode((PMMADDRESS_NODE)Vad, VadTree);
|
|
|
|
DPRINT("Moving on: %d\n", VadTree->NumberGenericTableElements);
|
|
|
|
|
|
|
|
/* Check if this VAD was the hint */
|
|
|
|
if (VadTree->NodeHint == Vad)
|
|
|
|
{
|
|
|
|
/* Get a new hint, unless we're empty now, in which case nothing */
|
|
|
|
VadTree->NodeHint = VadTree->BalancedRoot.RightChild;
|
|
|
|
if (!VadTree->NumberGenericTableElements) VadTree->NodeHint = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only PEB/TEB VADs supported for now */
|
|
|
|
ASSERT(Vad->u.VadFlags.PrivateMemory == 1);
|
|
|
|
ASSERT(Vad->u.VadFlags.VadType == VadNone);
|
|
|
|
|
|
|
|
/* Release the working set */
|
|
|
|
MiUnlockProcessWorkingSet(Process, Thread);
|
|
|
|
|
|
|
|
/* Free the VAD memory */
|
|
|
|
ExFreePool(Vad);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release the address space */
|
|
|
|
MmUnlockAddressSpace(&Process->Vm);
|
|
|
|
}
|
|
|
|
|
2009-10-15 05:56:41 +00:00
|
|
|
/* SYSTEM CALLS ***************************************************************/
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
NtAllocateUserPhysicalPages(IN HANDLE ProcessHandle,
|
|
|
|
IN OUT PULONG_PTR NumberOfPages,
|
|
|
|
IN OUT PULONG_PTR UserPfnArray)
|
|
|
|
{
|
|
|
|
UNIMPLEMENTED;
|
|
|
|
return STATUS_NOT_IMPLEMENTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
NtMapUserPhysicalPages(IN PVOID VirtualAddresses,
|
|
|
|
IN ULONG_PTR NumberOfPages,
|
|
|
|
IN OUT PULONG_PTR UserPfnArray)
|
|
|
|
{
|
|
|
|
UNIMPLEMENTED;
|
|
|
|
return STATUS_NOT_IMPLEMENTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
NtMapUserPhysicalPagesScatter(IN PVOID *VirtualAddresses,
|
|
|
|
IN ULONG_PTR NumberOfPages,
|
|
|
|
IN OUT PULONG_PTR UserPfnArray)
|
|
|
|
{
|
|
|
|
UNIMPLEMENTED;
|
|
|
|
return STATUS_NOT_IMPLEMENTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
NtFreeUserPhysicalPages(IN HANDLE ProcessHandle,
|
|
|
|
IN OUT PULONG_PTR NumberOfPages,
|
|
|
|
IN OUT PULONG_PTR UserPfnArray)
|
|
|
|
{
|
|
|
|
UNIMPLEMENTED;
|
|
|
|
return STATUS_NOT_IMPLEMENTED;
|
|
|
|
}
|
|
|
|
|
2009-06-27 08:41:45 +00:00
|
|
|
/* EOF */
|