2009-06-22 08:22:41 +00:00
|
|
|
/*
|
|
|
|
* PROJECT: ReactOS Kernel
|
|
|
|
* LICENSE: BSD - See COPYING.ARM in the top level directory
|
|
|
|
* FILE: ntoskrnl/mm/ARM3/pool.c
|
|
|
|
* PURPOSE: ARM Memory Manager Pool Allocator
|
|
|
|
* PROGRAMMERS: ReactOS Portable Systems Group
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* INCLUDES *******************************************************************/
|
|
|
|
|
|
|
|
#include <ntoskrnl.h>
|
|
|
|
#define NDEBUG
|
|
|
|
#include <debug.h>
|
|
|
|
|
|
|
|
#define MODULE_INVOLVED_IN_ARM3
|
2014-11-10 16:26:55 +00:00
|
|
|
#include <mm/ARM3/miarm.h>
|
2009-06-22 08:22:41 +00:00
|
|
|
|
|
|
|
/* GLOBALS ********************************************************************/
|
|
|
|
|
|
|
|
LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
PFN_COUNT MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
|
2009-06-22 08:22:41 +00:00
|
|
|
PVOID MmNonPagedPoolEnd0;
|
|
|
|
PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
|
2009-10-15 17:23:21 +00:00
|
|
|
KGUARDED_MUTEX MmPagedPoolMutex;
|
- Initialize the value of MmBootImageSize in ARM3 now.
- Also fix its value such that it's PDE aligned -- this makes sure that we don't step on any of the boot loader's PDE mappings and can blow everything away later.
- Initialize the MmSystem/User/Probe Addresses in ARM3 as well (no functional change).
- Print out a lot more of the VA ranges in ARM3's Phase 2 initialization. Most of the VA space is now dumped out.
- Write out the code to initialize session space VA ranges
- Image space, view space, working set space and pool space values are all calculated properly.
- NT default sizes are used, without support for registry overrides (yet).
- Also system view space is initialized and sized.
- Code is heavily commented and explained for inquisitive minds.
- Define the paged pool start address, minimum/default size, and add some extra pool header asserts/definitions.
- Define MmPagedPoolInfo to keep track of all paged pool related information (start/end PTEs, VA ranges, allocation/free bitmaps, etc).
- Fixed a lot of comments and added some new ones to provide extra clarity.
- Implement MiBuildPagedPool. It has two jobs:
- Build and create the shadow system page directory, which double-maps the System process' PDE.
- More explenations are in the comments.
- Define the paged pool region and size, and initialize MmPagedPoolInfo accordingly.
- Create and setup the paged pool allocation and free bitmaps (again explained in the comments).
- There shouldn't be any real functional change yet due to this commit.
- We need to create memory areas for session space and system view space otherwise the VA regions could get used by ReactOS instead.
svn path=/trunk/; revision=42148
2009-07-22 22:46:29 +00:00
|
|
|
MM_PAGED_POOL_INFO MmPagedPoolInfo;
|
2009-10-31 01:02:35 +00:00
|
|
|
SIZE_T MmAllocatedNonPagedPool;
|
|
|
|
ULONG MmSpecialPoolTag;
|
2010-04-20 22:47:51 +00:00
|
|
|
ULONG MmConsumedPoolPercentage;
|
|
|
|
BOOLEAN MmProtectFreedNonPagedPool;
|
2012-03-04 17:56:00 +00:00
|
|
|
SLIST_HEADER MiNonPagedPoolSListHead;
|
|
|
|
ULONG MiNonPagedPoolSListMaximum = 4;
|
|
|
|
SLIST_HEADER MiPagedPoolSListHead;
|
|
|
|
ULONG MiPagedPoolSListMaximum = 8;
|
- Initialize the value of MmBootImageSize in ARM3 now.
- Also fix its value such that it's PDE aligned -- this makes sure that we don't step on any of the boot loader's PDE mappings and can blow everything away later.
- Initialize the MmSystem/User/Probe Addresses in ARM3 as well (no functional change).
- Print out a lot more of the VA ranges in ARM3's Phase 2 initialization. Most of the VA space is now dumped out.
- Write out the code to initialize session space VA ranges
- Image space, view space, working set space and pool space values are all calculated properly.
- NT default sizes are used, without support for registry overrides (yet).
- Also system view space is initialized and sized.
- Code is heavily commented and explained for inquisitive minds.
- Define the paged pool start address, minimum/default size, and add some extra pool header asserts/definitions.
- Define MmPagedPoolInfo to keep track of all paged pool related information (start/end PTEs, VA ranges, allocation/free bitmaps, etc).
- Fixed a lot of comments and added some new ones to provide extra clarity.
- Implement MiBuildPagedPool. It has two jobs:
- Build and create the shadow system page directory, which double-maps the System process' PDE.
- More explenations are in the comments.
- Define the paged pool region and size, and initialize MmPagedPoolInfo accordingly.
- Create and setup the paged pool allocation and free bitmaps (again explained in the comments).
- There shouldn't be any real functional change yet due to this commit.
- We need to create memory areas for session space and system view space otherwise the VA regions could get used by ReactOS instead.
svn path=/trunk/; revision=42148
2009-07-22 22:46:29 +00:00
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
/* PRIVATE FUNCTIONS **********************************************************/
|
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
|
2011-12-25 18:21:05 +00:00
|
|
|
IN ULONG PageCount)
|
2010-08-29 19:13:08 +00:00
|
|
|
{
|
|
|
|
PMMPTE PointerPte, LastPte;
|
|
|
|
MMPTE TempPte;
|
|
|
|
|
|
|
|
/* If pool is physical, can't protect PTEs */
|
|
|
|
if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
|
|
|
|
|
|
|
|
/* Get PTE pointers and loop */
|
|
|
|
PointerPte = MiAddressToPte(VirtualAddress);
|
|
|
|
LastPte = PointerPte + PageCount;
|
|
|
|
do
|
|
|
|
{
|
|
|
|
/* Capture the PTE for safety */
|
|
|
|
TempPte = *PointerPte;
|
|
|
|
|
|
|
|
/* Mark it as an invalid PTE, set proto bit to recognize it as pool */
|
|
|
|
TempPte.u.Hard.Valid = 0;
|
|
|
|
TempPte.u.Soft.Prototype = 1;
|
|
|
|
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
|
|
|
|
} while (++PointerPte < LastPte);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Flush the TLB */
|
|
|
|
KeFlushEntireTb(TRUE, TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
|
2011-12-25 18:21:05 +00:00
|
|
|
IN ULONG PageCount)
|
2010-08-29 19:13:08 +00:00
|
|
|
{
|
|
|
|
PMMPTE PointerPte;
|
|
|
|
MMPTE TempPte;
|
|
|
|
PFN_NUMBER UnprotectedPages = 0;
|
|
|
|
|
|
|
|
/* If pool is physical, can't protect PTEs */
|
|
|
|
if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Get, and capture the PTE */
|
|
|
|
PointerPte = MiAddressToPte(VirtualAddress);
|
|
|
|
TempPte = *PointerPte;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Loop protected PTEs */
|
|
|
|
while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
|
|
|
|
{
|
|
|
|
/* Unprotect the PTE */
|
|
|
|
TempPte.u.Hard.Valid = 1;
|
|
|
|
TempPte.u.Soft.Prototype = 0;
|
|
|
|
MI_WRITE_VALID_PTE(PointerPte, TempPte);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* One more page */
|
|
|
|
if (++UnprotectedPages == PageCount) break;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Capture next PTE */
|
|
|
|
TempPte = *(++PointerPte);
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Return if any pages were unprotected */
|
|
|
|
return UnprotectedPages ? TRUE : FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
VOID
|
2010-08-29 19:13:08 +00:00
|
|
|
MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
|
|
|
|
OUT PVOID* PoolFlink,
|
|
|
|
OUT PVOID* PoolBlink)
|
|
|
|
{
|
|
|
|
BOOLEAN Safe;
|
|
|
|
PVOID PoolVa;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Initialize variables */
|
|
|
|
*PoolFlink = *PoolBlink = NULL;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Check if the list has entries */
|
|
|
|
if (IsListEmpty(Links) == FALSE)
|
|
|
|
{
|
|
|
|
/* We are going to need to forward link to do an insert */
|
|
|
|
PoolVa = Links->Flink;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* So make it safe to access */
|
|
|
|
Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
|
2013-07-14 09:30:26 +00:00
|
|
|
if (Safe) *PoolFlink = PoolVa;
|
2010-08-29 19:13:08 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Are we going to need a backward link too? */
|
|
|
|
if (Links != Links->Blink)
|
|
|
|
{
|
|
|
|
/* Get the head's backward link for the insert */
|
|
|
|
PoolVa = Links->Blink;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Make it safe to access */
|
|
|
|
Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
|
2013-07-14 09:30:26 +00:00
|
|
|
if (Safe) *PoolBlink = PoolVa;
|
2010-08-29 19:13:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
VOID
|
2010-08-29 19:13:08 +00:00
|
|
|
MiProtectedPoolProtectLinks(IN PVOID PoolFlink,
|
|
|
|
IN PVOID PoolBlink)
|
|
|
|
{
|
|
|
|
/* Reprotect the pages, if they got unprotected earlier */
|
|
|
|
if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
|
|
|
|
if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
|
|
|
|
IN PLIST_ENTRY Entry,
|
|
|
|
IN BOOLEAN Critical)
|
|
|
|
{
|
|
|
|
PVOID PoolFlink, PoolBlink;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Make the list accessible */
|
|
|
|
MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Now insert in the right position */
|
|
|
|
Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* And reprotect the pages containing the free links */
|
|
|
|
MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
|
|
|
|
}
|
|
|
|
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
|
|
|
|
{
|
|
|
|
PVOID PoolFlink, PoolBlink;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Make the list accessible */
|
|
|
|
MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Now remove */
|
|
|
|
RemoveEntryList(Entry);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* And reprotect the pages containing the free links */
|
|
|
|
if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
|
|
|
|
if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
|
|
|
|
}
|
|
|
|
|
2020-10-06 19:44:01 +00:00
|
|
|
CODE_SEG("INIT")
|
2009-06-22 08:22:41 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
[NTOS]: Make MM init read MmProductType to determine what SKU of ReactOS this is, instead of assuming Server. If you want to go back to the old behavior, you need to change "WinNT" to "ServerNT" in the hivesys under Product Type.
[NTOS]: Configure the MmSystemSize variable properly based on SKU and RAM. Previously, ReactOS told all drivers and applications you were running on a system with < 13MB RAM.
[NTOS]: Initialize thresholds for low and high memory (in pages), low and high paged pool memory, and low and high nonpaged pool memory. These are described in the source.
[NTOS]: Initialize events for each of those thresholds, and populate the \KernelObject\xxxCondition events that are documented in MSDN for driver and app developers.
[NTOS]: Define some internal thresholds to use later, representing the minimum number of free pages under we go berserk, and the minimum number of free pages that we consider "plenty".
[NTOS]: Rename MiRemoveFromList to MiUnlinkFreeOrZeroedPage (Windows name). Make the function handle MmAvailablePages decrement, instead of having the caller do it.
[NTOS]: Remove run-time initialization of the PFN lists, just initialize them statically (also fixes the fact we forgot to initialize their names).
[NTOS]: Move some more initialization code to ARM3 instead of mm.
[NTOS]: Read ProductType from registry into MmProductType instead of dummy value. Remove duplicate "Mirroring" variable read.
svn path=/trunk/; revision=45638
2010-02-20 21:48:36 +00:00
|
|
|
MiInitializeNonPagedPoolThresholds(VOID)
|
|
|
|
{
|
|
|
|
PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
|
|
|
|
|
|
|
|
/* Default low threshold of 8MB or one third of nonpaged pool */
|
|
|
|
MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
|
|
|
|
MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
|
|
|
|
|
|
|
|
/* Default high threshold of 20MB or 50% */
|
|
|
|
MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
|
|
|
|
MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
|
|
|
|
ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
|
|
|
|
}
|
|
|
|
|
2020-10-06 19:44:01 +00:00
|
|
|
CODE_SEG("INIT")
|
[NTOS]: Make MM init read MmProductType to determine what SKU of ReactOS this is, instead of assuming Server. If you want to go back to the old behavior, you need to change "WinNT" to "ServerNT" in the hivesys under Product Type.
[NTOS]: Configure the MmSystemSize variable properly based on SKU and RAM. Previously, ReactOS told all drivers and applications you were running on a system with < 13MB RAM.
[NTOS]: Initialize thresholds for low and high memory (in pages), low and high paged pool memory, and low and high nonpaged pool memory. These are described in the source.
[NTOS]: Initialize events for each of those thresholds, and populate the \KernelObject\xxxCondition events that are documented in MSDN for driver and app developers.
[NTOS]: Define some internal thresholds to use later, representing the minimum number of free pages under we go berserk, and the minimum number of free pages that we consider "plenty".
[NTOS]: Rename MiRemoveFromList to MiUnlinkFreeOrZeroedPage (Windows name). Make the function handle MmAvailablePages decrement, instead of having the caller do it.
[NTOS]: Remove run-time initialization of the PFN lists, just initialize them statically (also fixes the fact we forgot to initialize their names).
[NTOS]: Move some more initialization code to ARM3 instead of mm.
[NTOS]: Read ProductType from registry into MmProductType instead of dummy value. Remove duplicate "Mirroring" variable read.
svn path=/trunk/; revision=45638
2010-02-20 21:48:36 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializePoolEvents(VOID)
|
|
|
|
{
|
|
|
|
KIRQL OldIrql;
|
|
|
|
PFN_NUMBER FreePoolInPages;
|
|
|
|
|
|
|
|
/* Lock paged pool */
|
|
|
|
KeAcquireGuardedMutex(&MmPagedPoolMutex);
|
|
|
|
|
|
|
|
/* Total size of the paged pool minus the allocated size, is free */
|
|
|
|
FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
|
|
|
|
|
|
|
|
/* Check the initial state high state */
|
|
|
|
if (FreePoolInPages >= MiHighPagedPoolThreshold)
|
|
|
|
{
|
|
|
|
/* We have plenty of pool */
|
|
|
|
KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We don't */
|
|
|
|
KeClearEvent(MiHighPagedPoolEvent);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the initial low state */
|
|
|
|
if (FreePoolInPages <= MiLowPagedPoolThreshold)
|
|
|
|
{
|
|
|
|
/* We're very low in free pool memory */
|
|
|
|
KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We're not */
|
|
|
|
KeClearEvent(MiLowPagedPoolEvent);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release the paged pool lock */
|
|
|
|
KeReleaseGuardedMutex(&MmPagedPoolMutex);
|
|
|
|
|
|
|
|
/* Now it's time for the nonpaged pool lock */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
|
|
|
|
|
|
|
|
/* Free pages are the maximum minus what's been allocated */
|
|
|
|
FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
|
|
|
|
|
|
|
|
/* Check if we have plenty */
|
|
|
|
if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
|
|
|
|
{
|
|
|
|
/* We do, set the event */
|
|
|
|
KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We don't, clear the event */
|
|
|
|
KeClearEvent(MiHighNonPagedPoolEvent);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we have very little */
|
|
|
|
if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
|
|
|
|
{
|
|
|
|
/* We do, set the event */
|
|
|
|
KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We don't, clear it */
|
|
|
|
KeClearEvent(MiLowNonPagedPoolEvent);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We're done, release the nonpaged pool lock */
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
|
|
|
|
}
|
|
|
|
|
2020-10-06 19:44:01 +00:00
|
|
|
CODE_SEG("INIT")
|
[NTOS]: Make MM init read MmProductType to determine what SKU of ReactOS this is, instead of assuming Server. If you want to go back to the old behavior, you need to change "WinNT" to "ServerNT" in the hivesys under Product Type.
[NTOS]: Configure the MmSystemSize variable properly based on SKU and RAM. Previously, ReactOS told all drivers and applications you were running on a system with < 13MB RAM.
[NTOS]: Initialize thresholds for low and high memory (in pages), low and high paged pool memory, and low and high nonpaged pool memory. These are described in the source.
[NTOS]: Initialize events for each of those thresholds, and populate the \KernelObject\xxxCondition events that are documented in MSDN for driver and app developers.
[NTOS]: Define some internal thresholds to use later, representing the minimum number of free pages under we go berserk, and the minimum number of free pages that we consider "plenty".
[NTOS]: Rename MiRemoveFromList to MiUnlinkFreeOrZeroedPage (Windows name). Make the function handle MmAvailablePages decrement, instead of having the caller do it.
[NTOS]: Remove run-time initialization of the PFN lists, just initialize them statically (also fixes the fact we forgot to initialize their names).
[NTOS]: Move some more initialization code to ARM3 instead of mm.
[NTOS]: Read ProductType from registry into MmProductType instead of dummy value. Remove duplicate "Mirroring" variable read.
svn path=/trunk/; revision=45638
2010-02-20 21:48:36 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializeNonPagedPool(VOID)
|
2009-06-22 08:22:41 +00:00
|
|
|
{
|
|
|
|
ULONG i;
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
PFN_COUNT PoolPages;
|
2009-06-22 08:22:41 +00:00
|
|
|
PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
|
|
|
|
PMMPTE PointerPte;
|
|
|
|
PAGED_CODE();
|
|
|
|
|
2012-03-04 17:56:00 +00:00
|
|
|
//
|
|
|
|
// Initialize the pool S-LISTs as well as their maximum count. In general,
|
|
|
|
// we'll allow 8 times the default on a 2GB system, and two times the default
|
|
|
|
// on a 1GB system.
|
|
|
|
//
|
|
|
|
InitializeSListHead(&MiPagedPoolSListHead);
|
|
|
|
InitializeSListHead(&MiNonPagedPoolSListHead);
|
|
|
|
if (MmNumberOfPhysicalPages >= ((2 * _1GB) /PAGE_SIZE))
|
|
|
|
{
|
|
|
|
MiNonPagedPoolSListMaximum *= 8;
|
|
|
|
MiPagedPoolSListMaximum *= 8;
|
|
|
|
}
|
|
|
|
else if (MmNumberOfPhysicalPages >= (_1GB /PAGE_SIZE))
|
|
|
|
{
|
|
|
|
MiNonPagedPoolSListMaximum *= 2;
|
|
|
|
MiPagedPoolSListMaximum *= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// However if debugging options for the pool are enabled, turn off the S-LIST
|
|
|
|
// to reduce the risk of messing things up even more
|
|
|
|
//
|
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
{
|
|
|
|
MiNonPagedPoolSListMaximum = 0;
|
|
|
|
MiPagedPoolSListMaximum = 0;
|
|
|
|
}
|
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
//
|
|
|
|
// We keep 4 lists of free pages (4 lists help avoid contention)
|
|
|
|
//
|
|
|
|
for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Initialize each of them
|
|
|
|
//
|
|
|
|
InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Calculate how many pages the initial nonpaged pool has
|
|
|
|
//
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
PoolPages = (PFN_COUNT)BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
|
2009-06-22 08:22:41 +00:00
|
|
|
MmNumberOfFreeNonPagedPool = PoolPages;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
//
|
|
|
|
// Initialize the first free entry
|
|
|
|
//
|
|
|
|
FreeEntry = MmNonPagedPoolStart;
|
|
|
|
FirstEntry = FreeEntry;
|
|
|
|
FreeEntry->Size = PoolPages;
|
2010-08-29 19:32:25 +00:00
|
|
|
FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
|
2009-06-22 08:22:41 +00:00
|
|
|
FreeEntry->Owner = FirstEntry;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Insert it into the last list
|
|
|
|
//
|
|
|
|
InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
|
|
|
|
&FreeEntry->List);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
//
|
|
|
|
// Now create free entries for every single other page
|
|
|
|
//
|
|
|
|
while (PoolPages-- > 1)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Link them all back to the original entry
|
|
|
|
//
|
|
|
|
FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
|
|
|
|
FreeEntry->Owner = FirstEntry;
|
2010-08-29 19:32:25 +00:00
|
|
|
FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
|
2009-06-22 08:22:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Validate and remember first allocated pool page
|
|
|
|
//
|
|
|
|
PointerPte = MiAddressToPte(MmNonPagedPoolStart);
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 1);
|
|
|
|
MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
//
|
|
|
|
// Keep track of where initial nonpaged pool ends
|
|
|
|
//
|
|
|
|
MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
|
|
|
|
MmSizeOfNonPagedPoolInBytes);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
//
|
|
|
|
// Validate and remember last allocated pool page
|
|
|
|
//
|
|
|
|
PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 1);
|
|
|
|
MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
//
|
|
|
|
// Validate the first nonpaged pool expansion page (which is a guard page)
|
|
|
|
//
|
|
|
|
PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
//
|
|
|
|
// Calculate the size of the expansion region alone
|
|
|
|
//
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
MiExpansionPoolPagesInitialCharge = (PFN_COUNT)
|
2009-06-22 08:22:41 +00:00
|
|
|
BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
//
|
|
|
|
// Remove 2 pages, since there's a guard page on top and on the bottom
|
|
|
|
//
|
|
|
|
MiExpansionPoolPagesInitialCharge -= 2;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
//
|
|
|
|
// Now initialize the nonpaged pool expansion PTE space. Remember there's a
|
|
|
|
// guard page on top so make sure to skip it. The bottom guard page will be
|
|
|
|
// guaranteed by the fact our size is off by one.
|
|
|
|
//
|
2009-06-22 08:51:28 +00:00
|
|
|
MiInitializeSystemPtes(PointerPte + 1,
|
|
|
|
MiExpansionPoolPagesInitialCharge,
|
|
|
|
NonPagedPoolExpansion);
|
2009-06-22 08:22:41 +00:00
|
|
|
}
|
|
|
|
|
2012-02-26 05:53:53 +00:00
|
|
|
POOL_TYPE
|
|
|
|
NTAPI
|
|
|
|
MmDeterminePoolType(IN PVOID PoolAddress)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Use a simple bounds check
|
|
|
|
//
|
2013-01-28 18:58:55 +00:00
|
|
|
if (PoolAddress >= MmPagedPoolStart && PoolAddress <= MmPagedPoolEnd)
|
|
|
|
return PagedPool;
|
|
|
|
else if (PoolAddress >= MmNonPagedPoolStart && PoolAddress <= MmNonPagedPoolEnd)
|
|
|
|
return NonPagedPool;
|
|
|
|
KeBugCheckEx(BAD_POOL_CALLER, 0x42, (ULONG_PTR)PoolAddress, 0, 0);
|
2012-02-26 05:53:53 +00:00
|
|
|
}
|
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
PVOID
|
|
|
|
NTAPI
|
|
|
|
MiAllocatePoolPages(IN POOL_TYPE PoolType,
|
|
|
|
IN SIZE_T SizeInBytes)
|
|
|
|
{
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
PFN_NUMBER PageFrameNumber;
|
|
|
|
PFN_COUNT SizeInPages, PageTableCount;
|
2009-07-11 06:46:39 +00:00
|
|
|
ULONG i;
|
|
|
|
KIRQL OldIrql;
|
|
|
|
PLIST_ENTRY NextEntry, NextHead, LastHead;
|
2009-07-12 13:02:05 +00:00
|
|
|
PMMPTE PointerPte, StartPte;
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
PMMPDE PointerPde;
|
|
|
|
ULONG EndAllocation;
|
2009-07-12 13:02:05 +00:00
|
|
|
MMPTE TempPte;
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
MMPDE TempPde;
|
2009-07-11 06:46:39 +00:00
|
|
|
PMMPFN Pfn1;
|
2010-01-02 16:10:11 +00:00
|
|
|
PVOID BaseVa, BaseVaStart;
|
2009-07-11 06:46:39 +00:00
|
|
|
PMMFREE_POOL_ENTRY FreeEntry;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Figure out how big the allocation is in pages
|
|
|
|
//
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2012-04-19 14:33:53 +00:00
|
|
|
//
|
|
|
|
// Check for overflow
|
|
|
|
//
|
|
|
|
if (SizeInPages == 0)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Fail
|
|
|
|
//
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Handle paged pool
|
|
|
|
//
|
2010-08-29 19:13:08 +00:00
|
|
|
if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
|
2009-10-15 21:23:24 +00:00
|
|
|
{
|
2012-03-04 17:56:00 +00:00
|
|
|
//
|
|
|
|
// If only one page is being requested, try to grab it from the S-LIST
|
|
|
|
//
|
|
|
|
if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
|
|
|
|
{
|
|
|
|
BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
|
|
|
|
if (BaseVa) return BaseVa;
|
|
|
|
}
|
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Lock the paged pool mutex
|
|
|
|
//
|
|
|
|
KeAcquireGuardedMutex(&MmPagedPoolMutex);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Find some empty allocation space
|
|
|
|
//
|
|
|
|
i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
|
|
|
|
SizeInPages,
|
|
|
|
MmPagedPoolInfo.PagedPoolHint);
|
|
|
|
if (i == 0xFFFFFFFF)
|
|
|
|
{
|
|
|
|
//
|
2010-01-02 16:10:11 +00:00
|
|
|
// Get the page bit count
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
2020-02-05 22:48:26 +00:00
|
|
|
i = ((SizeInPages - 1) / PTE_PER_PAGE) + 1;
|
2013-09-11 19:59:59 +00:00
|
|
|
DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Check if there is enougn paged pool expansion space left
|
|
|
|
//
|
|
|
|
if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
|
2010-11-24 15:21:45 +00:00
|
|
|
(PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
|
2010-01-02 16:10:11 +00:00
|
|
|
{
|
|
|
|
//
|
|
|
|
// Out of memory!
|
|
|
|
//
|
2017-09-02 15:18:02 +00:00
|
|
|
DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
|
2010-01-02 16:10:11 +00:00
|
|
|
KeReleaseGuardedMutex(&MmPagedPoolMutex);
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Check if we'll have to expand past the last PTE we have available
|
2010-12-26 15:23:03 +00:00
|
|
|
//
|
2010-01-02 16:10:11 +00:00
|
|
|
if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
|
2010-11-24 15:21:45 +00:00
|
|
|
(PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
|
2010-01-02 16:10:11 +00:00
|
|
|
{
|
|
|
|
//
|
|
|
|
// We can only support this much then
|
|
|
|
//
|
2015-05-10 19:35:00 +00:00
|
|
|
PointerPde = MiPteToPde(MmPagedPoolInfo.LastPteForPagedPool);
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
PageTableCount = (PFN_COUNT)(PointerPde + 1 -
|
|
|
|
MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
|
2010-11-24 15:21:45 +00:00
|
|
|
ASSERT(PageTableCount < i);
|
|
|
|
i = PageTableCount;
|
2010-01-02 16:10:11 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Otherwise, there is plenty of space left for this expansion
|
|
|
|
//
|
2010-11-24 15:21:45 +00:00
|
|
|
PageTableCount = i;
|
2010-01-02 16:10:11 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
// Get the template PDE we'll use to expand
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
TempPde = ValidKernelPde;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Get the first PTE in expansion space
|
|
|
|
//
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
|
2011-01-22 09:43:52 +00:00
|
|
|
BaseVa = MiPdeToPte(PointerPde);
|
2010-01-02 16:10:11 +00:00
|
|
|
BaseVaStart = BaseVa;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Lock the PFN database and loop pages
|
2010-12-26 15:23:03 +00:00
|
|
|
//
|
2017-11-21 22:33:42 +00:00
|
|
|
OldIrql = MiAcquirePfnLock();
|
2010-01-02 16:10:11 +00:00
|
|
|
do
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// It should not already be valid
|
|
|
|
//
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
ASSERT(PointerPde->u.Hard.Valid == 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-06-04 22:08:40 +00:00
|
|
|
/* Request a page */
|
2010-11-02 15:16:22 +00:00
|
|
|
MI_SET_USAGE(MI_USAGE_PAGED_POOL);
|
|
|
|
MI_SET_PROCESS2("Kernel");
|
2010-09-29 01:10:28 +00:00
|
|
|
PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
|
2010-07-26 21:45:42 +00:00
|
|
|
#if (_MI_PAGING_LEVELS >= 3)
|
|
|
|
/* On PAE/x64 systems, there's no double-buffering */
|
2018-02-04 18:20:56 +00:00
|
|
|
/* Initialize the PFN entry for it */
|
|
|
|
MiInitializePfnForOtherProcess(PageFrameNumber,
|
|
|
|
(PMMPTE)PointerPde,
|
|
|
|
PFN_FROM_PTE(MiAddressToPte(PointerPde)));
|
|
|
|
|
2018-08-20 21:52:42 +00:00
|
|
|
/* Write the actual PDE now */
|
|
|
|
MI_WRITE_VALID_PDE(PointerPde, TempPde);
|
2010-07-26 21:45:42 +00:00
|
|
|
#else
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Save it into our double-buffered system page directory
|
|
|
|
//
|
2010-09-30 14:48:03 +00:00
|
|
|
MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-06-04 22:08:40 +00:00
|
|
|
/* Initialize the PFN */
|
|
|
|
MiInitializePfnForOtherProcess(PageFrameNumber,
|
2010-11-24 15:21:45 +00:00
|
|
|
(PMMPTE)PointerPde,
|
2020-02-05 22:48:26 +00:00
|
|
|
MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_PER_PAGE]);
|
2018-02-04 18:20:56 +00:00
|
|
|
#endif
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Move on to the next expansion address
|
|
|
|
//
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
PointerPde++;
|
2010-01-02 16:10:11 +00:00
|
|
|
BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
i--;
|
|
|
|
} while (i > 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Release the PFN database lock
|
2010-12-26 15:23:03 +00:00
|
|
|
//
|
2017-11-21 22:33:42 +00:00
|
|
|
MiReleasePfnLock(OldIrql);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// These pages are now available, clear their availablity bits
|
|
|
|
//
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
|
2020-02-05 22:48:26 +00:00
|
|
|
(PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
|
|
|
|
PTE_PER_PAGE;
|
2010-01-02 16:10:11 +00:00
|
|
|
RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
EndAllocation,
|
2020-02-05 22:48:26 +00:00
|
|
|
PageTableCount * PTE_PER_PAGE);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Update the next expansion location
|
|
|
|
//
|
2010-11-24 15:21:45 +00:00
|
|
|
MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Zero out the newly available memory
|
|
|
|
//
|
2010-11-24 15:21:45 +00:00
|
|
|
RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-01-02 16:10:11 +00:00
|
|
|
//
|
|
|
|
// Now try consuming the pages again
|
|
|
|
//
|
|
|
|
i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
|
|
|
|
SizeInPages,
|
|
|
|
0);
|
2010-12-26 15:23:03 +00:00
|
|
|
if (i == 0xFFFFFFFF)
|
2010-01-02 16:10:11 +00:00
|
|
|
{
|
|
|
|
//
|
|
|
|
// Out of memory!
|
|
|
|
//
|
2017-09-02 15:18:02 +00:00
|
|
|
DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
|
2010-01-02 16:10:11 +00:00
|
|
|
KeReleaseGuardedMutex(&MmPagedPoolMutex);
|
|
|
|
return NULL;
|
|
|
|
}
|
2009-10-15 21:23:24 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Update the pool hint if the request was just one page
|
|
|
|
//
|
|
|
|
if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Update the end bitmap so we know the bounds of this allocation when
|
|
|
|
// the time comes to free it
|
|
|
|
//
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
EndAllocation = i + SizeInPages - 1;
|
|
|
|
RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Now we can release the lock (it mainly protects the bitmap)
|
|
|
|
//
|
|
|
|
KeReleaseGuardedMutex(&MmPagedPoolMutex);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Now figure out where this allocation starts
|
|
|
|
//
|
|
|
|
BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Flush the TLB
|
|
|
|
//
|
|
|
|
KeFlushEntireTb(TRUE, TRUE);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-06-04 20:18:27 +00:00
|
|
|
/* Setup a demand-zero writable PTE */
|
|
|
|
MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Find the first and last PTE, then loop them all
|
|
|
|
//
|
|
|
|
PointerPte = MiAddressToPte(BaseVa);
|
|
|
|
StartPte = PointerPte + SizeInPages;
|
|
|
|
do
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Write the demand zero PTE and keep going
|
|
|
|
//
|
[NTOS]: Use SYSTEM_PD_SIZE instead of assuming that this is PAGE_SIZE, since this is not the case on (future) ARM and (current) AMD64 ports.
[NTOS]: Remove some magic numbers in the pool code, using PTE_COUNT, MiAddressToPde, when needed. Also, the expansion code uses PDEs, not PTEs, so differentiate this, because on some systems (ARM), there are different structures for both.
[NTOS]: Use MI_WRITE_INVALID_PTE.
ARM3 paged pool now works, the expansion bug has been fixed (and the code is more portable). Expect to see it gradually enabled soon.
svn path=/trunk/; revision=48939
2010-09-30 03:26:13 +00:00
|
|
|
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
|
|
|
|
} while (++PointerPte < StartPte);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Return the allocation address to the caller
|
|
|
|
//
|
|
|
|
return BaseVa;
|
2010-12-26 15:23:03 +00:00
|
|
|
}
|
|
|
|
|
2012-03-04 17:56:00 +00:00
|
|
|
//
|
|
|
|
// If only one page is being requested, try to grab it from the S-LIST
|
|
|
|
//
|
|
|
|
if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
|
|
|
|
{
|
|
|
|
BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
|
2019-11-25 18:32:06 +00:00
|
|
|
if (BaseVa) return BaseVa;
|
2012-03-04 17:56:00 +00:00
|
|
|
}
|
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Allocations of less than 4 pages go into their individual buckets
|
|
|
|
//
|
2021-06-14 01:50:01 +00:00
|
|
|
i = min(SizeInPages, MI_MAX_FREE_PAGE_LISTS) - 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Loop through all the free page lists based on the page index
|
|
|
|
//
|
|
|
|
NextHead = &MmNonPagedPoolFreeListHead[i];
|
|
|
|
LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Acquire the nonpaged pool lock
|
|
|
|
//
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
|
|
|
|
do
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Now loop through all the free page entries in this given list
|
|
|
|
//
|
|
|
|
NextEntry = NextHead->Flink;
|
|
|
|
while (NextEntry != NextHead)
|
|
|
|
{
|
2010-08-29 19:27:58 +00:00
|
|
|
/* Is freed non paged pool enabled */
|
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
{
|
|
|
|
/* We need to be able to touch this page, unprotect it */
|
2011-08-18 19:50:19 +00:00
|
|
|
MiUnProtectFreeNonPagedPool(NextEntry, 0);
|
2010-08-29 19:27:58 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Grab the entry and see if it can handle our allocation
|
|
|
|
//
|
|
|
|
FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
|
2010-08-29 19:32:25 +00:00
|
|
|
ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
|
2009-07-11 06:46:39 +00:00
|
|
|
if (FreeEntry->Size >= SizeInPages)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// It does, so consume the pages from here
|
|
|
|
//
|
|
|
|
FreeEntry->Size -= SizeInPages;
|
|
|
|
|
|
|
|
//
|
|
|
|
// The allocation will begin in this free page area
|
|
|
|
//
|
|
|
|
BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
|
|
|
|
(FreeEntry->Size << PAGE_SHIFT));
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:27:58 +00:00
|
|
|
/* Remove the item from the list, depending if pool is protected */
|
2013-11-17 21:58:48 +00:00
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
MiProtectedPoolRemoveEntryList(&FreeEntry->List);
|
|
|
|
else
|
2010-08-29 19:27:58 +00:00
|
|
|
RemoveEntryList(&FreeEntry->List);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// However, check if its' still got space left
|
|
|
|
//
|
|
|
|
if (FreeEntry->Size != 0)
|
|
|
|
{
|
2010-08-29 19:27:58 +00:00
|
|
|
/* Check which list to insert this entry into */
|
2021-06-14 01:50:01 +00:00
|
|
|
i = min(FreeEntry->Size, MI_MAX_FREE_PAGE_LISTS) - 1;
|
2010-08-29 19:27:58 +00:00
|
|
|
|
|
|
|
/* Insert the entry into the free list head, check for prot. pool */
|
2013-11-17 21:58:48 +00:00
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
|
|
|
|
else
|
2010-08-29 19:27:58 +00:00
|
|
|
InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:27:58 +00:00
|
|
|
/* Is freed non paged pool protected? */
|
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
{
|
|
|
|
/* Protect the freed pool! */
|
|
|
|
MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
|
|
|
|
}
|
2009-07-11 06:46:39 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Grab the PTE for this allocation
|
|
|
|
//
|
|
|
|
PointerPte = MiAddressToPte(BaseVa);
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 1);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Grab the PFN NextEntry and index
|
|
|
|
//
|
|
|
|
Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Now mark it as the beginning of an allocation
|
|
|
|
//
|
|
|
|
ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
|
|
|
|
Pfn1->u3.e1.StartOfAllocation = 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
[NTOS]: Remove useless variables in kernel code that were set, but never actually used (dead code, tests, copy/pasters). If a variable was set but not used because of missing/#if'ed out code, a note was added instead.
[NTOS]: In the process, fix bugs in the Event dispatcher code that used Win32 EVENT_TYPE instead of NT KOBJECTS enumeration.
[NTOS]: Fix a bug in ObpInsertHandleCount, where the object access check was being done with the previous mode, instead of honoring the probe mode, which is defined by OBJ_FORCE_ACCESS_CHECK.
[NTOS]: Fix a bug in a section function which was always returning STATUS_SUCCESS, now it returns the result of the previous Status = function assignment. If this isn't desired, then don't check for the Status anymore.
[NTOS]: Note that MDL code does not support SkipBytes argument. If it is used, MDL could be invalid.
[NTOS]: Add checks for VerifierAllocation and set it when needed (WIP).
[NTOS]: Clarify what _WORKING_LINKER_ is, and the legal risks in continuing to use a linker that builds non-Microsoft drivers when used with headers whose EULA specify that they can only be used for Microsoft drivers.
svn path=/trunk/; revision=48692
2010-09-04 08:17:17 +00:00
|
|
|
/* Mark it as special pool if needed */
|
|
|
|
ASSERT(Pfn1->u4.VerifierAllocation == 0);
|
2013-09-20 05:46:29 +00:00
|
|
|
if (PoolType & VERIFIER_POOL_MASK)
|
|
|
|
{
|
|
|
|
Pfn1->u4.VerifierAllocation = 1;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Check if the allocation is larger than one page
|
|
|
|
//
|
|
|
|
if (SizeInPages != 1)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Navigate to the last PFN entry and PTE
|
|
|
|
//
|
|
|
|
PointerPte += SizeInPages - 1;
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 1);
|
|
|
|
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Mark this PFN as the last (might be the same as the first)
|
|
|
|
//
|
|
|
|
ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
|
|
|
|
Pfn1->u3.e1.EndOfAllocation = 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Release the nonpaged pool lock, and return the allocation
|
|
|
|
//
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
|
|
|
|
return BaseVa;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Try the next free page entry
|
|
|
|
//
|
|
|
|
NextEntry = FreeEntry->List.Flink;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:27:58 +00:00
|
|
|
/* Is freed non paged pool protected? */
|
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
{
|
|
|
|
/* Protect the freed pool! */
|
|
|
|
MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
|
|
|
|
}
|
2009-07-11 06:46:39 +00:00
|
|
|
}
|
|
|
|
} while (++NextHead < LastHead);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// If we got here, we're out of space.
|
|
|
|
// Start by releasing the lock
|
|
|
|
//
|
2009-07-12 13:02:05 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
|
|
|
|
|
|
|
|
//
|
|
|
|
// Allocate some system PTEs
|
|
|
|
//
|
|
|
|
StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
|
|
|
|
PointerPte = StartPte;
|
|
|
|
if (StartPte == NULL)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Ran out of memory
|
|
|
|
//
|
2021-03-18 10:48:09 +00:00
|
|
|
DPRINT("Out of NP Expansion Pool\n");
|
2009-07-12 13:02:05 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-12 13:02:05 +00:00
|
|
|
//
|
|
|
|
// Acquire the pool lock now
|
|
|
|
//
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-12 13:02:05 +00:00
|
|
|
//
|
|
|
|
// Lock the PFN database too
|
|
|
|
//
|
2017-11-21 22:36:34 +00:00
|
|
|
MiAcquirePfnLockAtDpcLevel();
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2021-02-22 10:43:04 +00:00
|
|
|
/* Check that we have enough available pages for this request */
|
|
|
|
if (MmAvailablePages < SizeInPages)
|
|
|
|
{
|
|
|
|
MiReleasePfnLockFromDpcLevel();
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
|
|
|
|
|
|
|
|
MiReleaseSystemPtes(StartPte, SizeInPages, NonPagedPoolExpansion);
|
|
|
|
|
|
|
|
DPRINT1("OUT OF AVAILABLE PAGES! Required %lu, Available %lu\n", SizeInPages, MmAvailablePages);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-07-12 13:02:05 +00:00
|
|
|
//
|
|
|
|
// Loop the pages
|
|
|
|
//
|
2010-02-10 13:56:54 +00:00
|
|
|
TempPte = ValidKernelPte;
|
2009-07-12 13:02:05 +00:00
|
|
|
do
|
|
|
|
{
|
2010-06-04 17:40:11 +00:00
|
|
|
/* Allocate a page */
|
2010-11-02 15:16:22 +00:00
|
|
|
MI_SET_USAGE(MI_USAGE_PAGED_POOL);
|
|
|
|
MI_SET_PROCESS2("Kernel");
|
2010-09-29 01:10:28 +00:00
|
|
|
PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-05-12 22:47:46 +00:00
|
|
|
/* Get the PFN entry for it and fill it out */
|
2009-07-12 13:02:05 +00:00
|
|
|
Pfn1 = MiGetPfnEntry(PageFrameNumber);
|
2010-05-12 22:47:46 +00:00
|
|
|
Pfn1->u3.e2.ReferenceCount = 1;
|
|
|
|
Pfn1->u2.ShareCount = 1;
|
|
|
|
Pfn1->PteAddress = PointerPte;
|
|
|
|
Pfn1->u3.e1.PageLocation = ActiveAndValid;
|
|
|
|
Pfn1->u4.VerifierAllocation = 0;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-05-12 22:47:46 +00:00
|
|
|
/* Write the PTE for it */
|
2009-07-12 13:02:05 +00:00
|
|
|
TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
|
2010-06-06 18:45:46 +00:00
|
|
|
MI_WRITE_VALID_PTE(PointerPte++, TempPte);
|
2009-07-12 13:02:05 +00:00
|
|
|
} while (--SizeInPages > 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-12 13:02:05 +00:00
|
|
|
//
|
|
|
|
// This is the last page
|
|
|
|
//
|
|
|
|
Pfn1->u3.e1.EndOfAllocation = 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-12 13:02:05 +00:00
|
|
|
//
|
|
|
|
// Get the first page and mark it as such
|
|
|
|
//
|
|
|
|
Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
|
|
|
|
Pfn1->u3.e1.StartOfAllocation = 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
[NTOS]: Remove useless variables in kernel code that were set, but never actually used (dead code, tests, copy/pasters). If a variable was set but not used because of missing/#if'ed out code, a note was added instead.
[NTOS]: In the process, fix bugs in the Event dispatcher code that used Win32 EVENT_TYPE instead of NT KOBJECTS enumeration.
[NTOS]: Fix a bug in ObpInsertHandleCount, where the object access check was being done with the previous mode, instead of honoring the probe mode, which is defined by OBJ_FORCE_ACCESS_CHECK.
[NTOS]: Fix a bug in a section function which was always returning STATUS_SUCCESS, now it returns the result of the previous Status = function assignment. If this isn't desired, then don't check for the Status anymore.
[NTOS]: Note that MDL code does not support SkipBytes argument. If it is used, MDL could be invalid.
[NTOS]: Add checks for VerifierAllocation and set it when needed (WIP).
[NTOS]: Clarify what _WORKING_LINKER_ is, and the legal risks in continuing to use a linker that builds non-Microsoft drivers when used with headers whose EULA specify that they can only be used for Microsoft drivers.
svn path=/trunk/; revision=48692
2010-09-04 08:17:17 +00:00
|
|
|
/* Mark it as a verifier allocation if needed */
|
|
|
|
ASSERT(Pfn1->u4.VerifierAllocation == 0);
|
2013-09-20 05:46:29 +00:00
|
|
|
if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-12 13:02:05 +00:00
|
|
|
//
|
|
|
|
// Release the PFN and nonpaged pool lock
|
|
|
|
//
|
2017-11-21 22:36:34 +00:00
|
|
|
MiReleasePfnLockFromDpcLevel();
|
2009-07-12 13:02:05 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
2009-07-12 13:02:05 +00:00
|
|
|
// Return the address
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
2019-11-25 18:32:06 +00:00
|
|
|
return MiPteToAddress(StartPte);
|
2009-07-11 06:46:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ULONG
|
|
|
|
NTAPI
|
|
|
|
MiFreePoolPages(IN PVOID StartingVa)
|
|
|
|
{
|
|
|
|
PMMPTE PointerPte, StartPte;
|
|
|
|
PMMPFN Pfn1, StartPfn;
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
PFN_COUNT FreePages, NumberOfPages;
|
2009-07-11 06:46:39 +00:00
|
|
|
KIRQL OldIrql;
|
|
|
|
PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
|
2009-10-15 21:23:24 +00:00
|
|
|
ULONG i, End;
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
ULONG_PTR Offset;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Handle paged pool
|
|
|
|
//
|
|
|
|
if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Calculate the offset from the beginning of paged pool, and convert it
|
|
|
|
// into pages
|
|
|
|
//
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart;
|
|
|
|
i = (ULONG)(Offset >> PAGE_SHIFT);
|
2009-10-15 21:23:24 +00:00
|
|
|
End = i;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Now use the end bitmap to scan until we find a set bit, meaning that
|
|
|
|
// this allocation finishes here
|
|
|
|
//
|
|
|
|
while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
2012-03-04 17:56:00 +00:00
|
|
|
// Now calculate the total number of pages this allocation spans. If it's
|
|
|
|
// only one page, add it to the S-LIST instead of freeing it
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
NumberOfPages = End - i + 1;
|
2012-03-04 17:56:00 +00:00
|
|
|
if ((NumberOfPages == 1) &&
|
|
|
|
(ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum))
|
|
|
|
{
|
|
|
|
InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa);
|
|
|
|
return 1;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-06-05 04:16:46 +00:00
|
|
|
/* Delete the actual pages */
|
|
|
|
PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
|
|
|
|
FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
|
|
|
|
ASSERT(FreePages == NumberOfPages);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Acquire the paged pool lock
|
|
|
|
//
|
|
|
|
KeAcquireGuardedMutex(&MmPagedPoolMutex);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Clear the allocation and free bits
|
|
|
|
//
|
2011-05-24 17:55:27 +00:00
|
|
|
RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End);
|
2009-10-15 21:23:24 +00:00
|
|
|
RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Update the hint if we need to
|
|
|
|
//
|
|
|
|
if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// Release the lock protecting the bitmaps
|
|
|
|
//
|
|
|
|
KeReleaseGuardedMutex(&MmPagedPoolMutex);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-10-15 21:23:24 +00:00
|
|
|
//
|
|
|
|
// And finally return the number of pages freed
|
|
|
|
//
|
|
|
|
return NumberOfPages;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
2012-03-04 17:56:00 +00:00
|
|
|
// Get the first PTE and its corresponding PFN entry. If this is also the
|
|
|
|
// last PTE, meaning that this allocation was only for one page, push it into
|
|
|
|
// the S-LIST instead of freeing it
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
StartPte = PointerPte = MiAddressToPte(StartingVa);
|
|
|
|
StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
|
2012-03-04 17:56:00 +00:00
|
|
|
if ((Pfn1->u3.e1.EndOfAllocation == 1) &&
|
|
|
|
(ExQueryDepthSList(&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum))
|
|
|
|
{
|
|
|
|
InterlockedPushEntrySList(&MiNonPagedPoolSListHead, StartingVa);
|
|
|
|
return 1;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Loop until we find the last PTE
|
|
|
|
//
|
|
|
|
while (Pfn1->u3.e1.EndOfAllocation == 0)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Keep going
|
|
|
|
//
|
|
|
|
PointerPte++;
|
|
|
|
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Now we know how many pages we have
|
|
|
|
//
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
NumberOfPages = (PFN_COUNT)(PointerPte - StartPte + 1);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Acquire the nonpaged pool lock
|
|
|
|
//
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Mark the first and last PTEs as not part of an allocation anymore
|
|
|
|
//
|
2010-12-26 15:23:03 +00:00
|
|
|
StartPfn->u3.e1.StartOfAllocation = 0;
|
2009-07-11 06:46:39 +00:00
|
|
|
Pfn1->u3.e1.EndOfAllocation = 0;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Assume we will free as many pages as the allocation was
|
|
|
|
//
|
|
|
|
FreePages = NumberOfPages;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Peek one page past the end of the allocation
|
|
|
|
//
|
|
|
|
PointerPte++;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Guard against going past initial nonpaged pool
|
|
|
|
//
|
|
|
|
if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// This page is on the outskirts of initial nonpaged pool, so ignore it
|
|
|
|
//
|
|
|
|
Pfn1 = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Sanity check */
|
|
|
|
ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Check if protected pool is enabled */
|
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
{
|
|
|
|
/* The freed block will be merged, it must be made accessible */
|
2011-08-18 19:50:19 +00:00
|
|
|
MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
|
2010-08-29 19:13:08 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
2010-12-26 15:23:03 +00:00
|
|
|
// Otherwise, our entire allocation must've fit within the initial non
|
2009-07-11 06:46:39 +00:00
|
|
|
// paged pool, or the expansion nonpaged pool, so get the PFN entry of
|
|
|
|
// the next allocation
|
|
|
|
//
|
|
|
|
if (PointerPte->u.Hard.Valid == 1)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// It's either expansion or initial: get the PFN entry
|
|
|
|
//
|
2010-12-26 15:23:03 +00:00
|
|
|
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
|
2009-07-11 06:46:39 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// This means we've reached the guard page that protects the end of
|
|
|
|
// the expansion nonpaged pool
|
|
|
|
//
|
|
|
|
Pfn1 = NULL;
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Check if this allocation actually exists
|
|
|
|
//
|
|
|
|
if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// It doesn't, so we should actually locate a free entry descriptor
|
|
|
|
//
|
|
|
|
FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
|
|
|
|
(NumberOfPages << PAGE_SHIFT));
|
2010-08-29 19:32:25 +00:00
|
|
|
ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
|
2009-07-11 06:46:39 +00:00
|
|
|
ASSERT(FreeEntry->Owner == FreeEntry);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Consume this entry's pages */
|
2009-07-11 06:46:39 +00:00
|
|
|
FreePages += FreeEntry->Size;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Remove the item from the list, depending if pool is protected */
|
2013-11-17 21:58:48 +00:00
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
MiProtectedPoolRemoveEntryList(&FreeEntry->List);
|
|
|
|
else
|
2010-08-29 19:13:08 +00:00
|
|
|
RemoveEntryList(&FreeEntry->List);
|
2009-07-11 06:46:39 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Now get the official free entry we'll create for the caller's allocation
|
|
|
|
//
|
|
|
|
FreeEntry = StartingVa;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Check if the our allocation is the very first page
|
|
|
|
//
|
|
|
|
if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Then we can't do anything or we'll risk underflowing
|
|
|
|
//
|
|
|
|
Pfn1 = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Otherwise, get the PTE for the page right before our allocation
|
|
|
|
//
|
|
|
|
PointerPte -= NumberOfPages + 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Check if protected pool is enabled */
|
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
{
|
|
|
|
/* The freed block will be merged, it must be made accessible */
|
2011-08-18 19:50:19 +00:00
|
|
|
MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
|
2010-08-29 19:13:08 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Check if this is valid pool, or a guard page */
|
2009-07-11 06:46:39 +00:00
|
|
|
if (PointerPte->u.Hard.Valid == 1)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// It's either expansion or initial nonpaged pool, get the PFN entry
|
|
|
|
//
|
|
|
|
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// We must've reached the guard page, so don't risk touching it
|
|
|
|
//
|
|
|
|
Pfn1 = NULL;
|
|
|
|
}
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Check if there is a valid PFN entry for the page before the allocation
|
|
|
|
// and then check if this page was actually the end of an allocation.
|
|
|
|
// If it wasn't, then we know for sure it's a free page
|
|
|
|
//
|
|
|
|
if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Get the free entry descriptor for that given page range
|
|
|
|
//
|
|
|
|
FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
|
2010-08-29 19:32:25 +00:00
|
|
|
ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
|
2009-07-11 06:46:39 +00:00
|
|
|
FreeEntry = FreeEntry->Owner;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Check if protected pool is enabled */
|
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
{
|
|
|
|
/* The freed block will be merged, it must be made accessible */
|
2011-08-18 19:50:19 +00:00
|
|
|
MiUnProtectFreeNonPagedPool(FreeEntry, 0);
|
2010-08-29 19:13:08 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
2021-06-14 01:50:01 +00:00
|
|
|
// Check if the entry is small enough (1-3 pages) to be indexed on a free list
|
2009-07-11 06:46:39 +00:00
|
|
|
// If it is, we'll want to re-insert it, since we're about to
|
|
|
|
// collapse our pages on top of it, which will change its count
|
|
|
|
//
|
2021-06-14 01:50:01 +00:00
|
|
|
if (FreeEntry->Size < MI_MAX_FREE_PAGE_LISTS)
|
2009-07-11 06:46:39 +00:00
|
|
|
{
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Remove the item from the list, depending if pool is protected */
|
2013-11-17 21:58:48 +00:00
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
MiProtectedPoolRemoveEntryList(&FreeEntry->List);
|
|
|
|
else
|
2010-08-29 19:13:08 +00:00
|
|
|
RemoveEntryList(&FreeEntry->List);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Update its size
|
|
|
|
//
|
|
|
|
FreeEntry->Size += FreePages;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// And now find the new appropriate list to place it in
|
|
|
|
//
|
2021-06-14 01:50:01 +00:00
|
|
|
i = min(FreeEntry->Size, MI_MAX_FREE_PAGE_LISTS) - 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Insert the entry into the free list head, check for prot. pool */
|
2013-11-17 21:58:48 +00:00
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
|
|
|
|
else
|
2010-08-29 19:13:08 +00:00
|
|
|
InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
|
2009-07-11 06:46:39 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Otherwise, just combine our free pages into this entry
|
|
|
|
//
|
|
|
|
FreeEntry->Size += FreePages;
|
|
|
|
}
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Check if we were unable to do any compaction, and we'll stick with this
|
|
|
|
//
|
|
|
|
if (FreeEntry == StartingVa)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Well, now we are a free entry. At worse we just have our newly freed
|
|
|
|
// pages, at best we have our pages plus whatever entry came after us
|
|
|
|
//
|
|
|
|
FreeEntry->Size = FreePages;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Find the appropriate list we should be on
|
|
|
|
//
|
2021-06-14 01:50:01 +00:00
|
|
|
i = min(FreeEntry->Size, MI_MAX_FREE_PAGE_LISTS) - 1;
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Insert the entry into the free list head, check for prot. pool */
|
2013-11-17 21:58:48 +00:00
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
|
|
|
|
else
|
2010-08-29 19:13:08 +00:00
|
|
|
InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
|
2009-07-11 06:46:39 +00:00
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Just a sanity check
|
|
|
|
//
|
|
|
|
ASSERT(FreePages != 0);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// Get all the pages between our allocation and its end. These will all now
|
|
|
|
// become free page chunks.
|
|
|
|
//
|
|
|
|
NextEntry = StartingVa;
|
2010-12-26 15:23:03 +00:00
|
|
|
LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
|
2009-07-11 06:46:39 +00:00
|
|
|
do
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Link back to the parent free entry, and keep going
|
|
|
|
//
|
2010-08-29 19:27:58 +00:00
|
|
|
NextEntry->Owner = FreeEntry;
|
2010-08-29 19:32:25 +00:00
|
|
|
NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
|
2009-07-11 06:46:39 +00:00
|
|
|
NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
|
|
|
|
} while (NextEntry != LastEntry);
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2010-08-29 19:13:08 +00:00
|
|
|
/* Is freed non paged pool protected? */
|
|
|
|
if (MmProtectFreedNonPagedPool)
|
|
|
|
{
|
|
|
|
/* Protect the freed pool! */
|
|
|
|
MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
|
|
|
|
}
|
2010-12-26 15:23:03 +00:00
|
|
|
|
2009-07-11 06:46:39 +00:00
|
|
|
//
|
|
|
|
// We're done, release the lock and let the caller know how much we freed
|
|
|
|
//
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
|
|
|
|
return NumberOfPages;
|
|
|
|
}
|
|
|
|
|
2009-10-16 00:28:33 +00:00
|
|
|
|
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
MiRaisePoolQuota(IN POOL_TYPE PoolType,
|
|
|
|
IN ULONG CurrentMaxQuota,
|
|
|
|
OUT PULONG NewMaxQuota)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Not implemented
|
|
|
|
//
|
|
|
|
UNIMPLEMENTED;
|
|
|
|
*NewMaxQuota = CurrentMaxQuota + 65536;
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2012-07-15 23:42:27 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiInitializeSessionPool(VOID)
|
|
|
|
{
|
2015-05-10 19:35:00 +00:00
|
|
|
PMMPTE PointerPte, LastPte;
|
|
|
|
PMMPDE PointerPde, LastPde;
|
2012-07-15 23:42:27 +00:00
|
|
|
PFN_NUMBER PageFrameIndex, PdeCount;
|
|
|
|
PPOOL_DESCRIPTOR PoolDescriptor;
|
|
|
|
PMM_SESSION_SPACE SessionGlobal;
|
|
|
|
PMM_PAGED_POOL_INFO PagedPoolInfo;
|
|
|
|
NTSTATUS Status;
|
|
|
|
ULONG Index, PoolSize, BitmapSize;
|
|
|
|
PAGED_CODE();
|
|
|
|
|
|
|
|
/* Lock session pool */
|
|
|
|
SessionGlobal = MmSessionSpace->GlobalVirtualAddress;
|
|
|
|
KeInitializeGuardedMutex(&SessionGlobal->PagedPoolMutex);
|
|
|
|
|
|
|
|
/* Setup a valid pool descriptor */
|
|
|
|
PoolDescriptor = &MmSessionSpace->PagedPool;
|
|
|
|
ExInitializePoolDescriptor(PoolDescriptor,
|
|
|
|
PagedPoolSession,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
&SessionGlobal->PagedPoolMutex);
|
|
|
|
|
|
|
|
/* Setup the pool addresses */
|
|
|
|
MmSessionSpace->PagedPoolStart = (PVOID)MiSessionPoolStart;
|
|
|
|
MmSessionSpace->PagedPoolEnd = (PVOID)((ULONG_PTR)MiSessionPoolEnd - 1);
|
|
|
|
DPRINT1("Session Pool Start: 0x%p End: 0x%p\n",
|
|
|
|
MmSessionSpace->PagedPoolStart, MmSessionSpace->PagedPoolEnd);
|
|
|
|
|
|
|
|
/* Reset all the counters */
|
|
|
|
PagedPoolInfo = &MmSessionSpace->PagedPoolInfo;
|
|
|
|
PagedPoolInfo->PagedPoolCommit = 0;
|
|
|
|
PagedPoolInfo->PagedPoolHint = 0;
|
|
|
|
PagedPoolInfo->AllocatedPagedPool = 0;
|
|
|
|
|
|
|
|
/* Compute PDE and PTE addresses */
|
|
|
|
PointerPde = MiAddressToPde(MmSessionSpace->PagedPoolStart);
|
|
|
|
PointerPte = MiAddressToPte(MmSessionSpace->PagedPoolStart);
|
|
|
|
LastPde = MiAddressToPde(MmSessionSpace->PagedPoolEnd);
|
|
|
|
LastPte = MiAddressToPte(MmSessionSpace->PagedPoolEnd);
|
|
|
|
|
|
|
|
/* Write them down */
|
|
|
|
MmSessionSpace->PagedPoolBasePde = PointerPde;
|
|
|
|
PagedPoolInfo->FirstPteForPagedPool = PointerPte;
|
|
|
|
PagedPoolInfo->LastPteForPagedPool = LastPte;
|
|
|
|
PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1;
|
|
|
|
|
|
|
|
/* Zero the PDEs */
|
|
|
|
PdeCount = LastPde - PointerPde;
|
|
|
|
RtlZeroMemory(PointerPde, (PdeCount + 1) * sizeof(MMPTE));
|
|
|
|
|
|
|
|
/* Initialize the PFN for the PDE */
|
|
|
|
Status = MiInitializeAndChargePfn(&PageFrameIndex,
|
|
|
|
PointerPde,
|
|
|
|
MmSessionSpace->SessionPageDirectoryIndex,
|
|
|
|
TRUE);
|
|
|
|
ASSERT(NT_SUCCESS(Status) == TRUE);
|
|
|
|
|
|
|
|
/* Initialize the first page table */
|
|
|
|
Index = (ULONG_PTR)MmSessionSpace->PagedPoolStart - (ULONG_PTR)MmSessionBase;
|
|
|
|
Index >>= 22;
|
2012-12-30 11:54:40 +00:00
|
|
|
#ifndef _M_AMD64 // FIXME
|
2012-07-15 23:42:27 +00:00
|
|
|
ASSERT(MmSessionSpace->PageTables[Index].u.Long == 0);
|
|
|
|
MmSessionSpace->PageTables[Index] = *PointerPde;
|
2012-12-30 11:54:40 +00:00
|
|
|
#endif
|
2012-07-15 23:42:27 +00:00
|
|
|
|
|
|
|
/* Bump up counters */
|
|
|
|
InterlockedIncrementSizeT(&MmSessionSpace->NonPageablePages);
|
|
|
|
InterlockedIncrementSizeT(&MmSessionSpace->CommittedPages);
|
|
|
|
|
|
|
|
/* Compute the size of the pool in pages, and of the bitmap for it */
|
|
|
|
PoolSize = MmSessionPoolSize >> PAGE_SHIFT;
|
|
|
|
BitmapSize = sizeof(RTL_BITMAP) + ((PoolSize + 31) / 32) * sizeof(ULONG);
|
|
|
|
|
|
|
|
/* Allocate and initialize the bitmap to track allocations */
|
|
|
|
PagedPoolInfo->PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
|
|
|
|
BitmapSize,
|
2016-01-05 19:53:07 +00:00
|
|
|
TAG_MM);
|
2012-07-15 23:42:27 +00:00
|
|
|
ASSERT(PagedPoolInfo->PagedPoolAllocationMap != NULL);
|
|
|
|
RtlInitializeBitMap(PagedPoolInfo->PagedPoolAllocationMap,
|
|
|
|
(PULONG)(PagedPoolInfo->PagedPoolAllocationMap + 1),
|
|
|
|
PoolSize);
|
|
|
|
|
|
|
|
/* Set all bits, but clear the first page table's worth */
|
|
|
|
RtlSetAllBits(PagedPoolInfo->PagedPoolAllocationMap);
|
|
|
|
RtlClearBits(PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE);
|
|
|
|
|
|
|
|
/* Allocate and initialize the bitmap to track free space */
|
|
|
|
PagedPoolInfo->EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
|
|
|
|
BitmapSize,
|
2016-01-05 19:53:07 +00:00
|
|
|
TAG_MM);
|
2012-07-15 23:42:27 +00:00
|
|
|
ASSERT(PagedPoolInfo->EndOfPagedPoolBitmap != NULL);
|
|
|
|
RtlInitializeBitMap(PagedPoolInfo->EndOfPagedPoolBitmap,
|
|
|
|
(PULONG)(PagedPoolInfo->EndOfPagedPoolBitmap + 1),
|
|
|
|
PoolSize);
|
|
|
|
|
|
|
|
/* Clear all the bits and return success */
|
|
|
|
RtlClearAllBits(PagedPoolInfo->EndOfPagedPoolBitmap);
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2009-10-16 00:28:33 +00:00
|
|
|
/* PUBLIC FUNCTIONS ***********************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @unimplemented
|
|
|
|
*/
|
|
|
|
PVOID
|
|
|
|
NTAPI
|
|
|
|
MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
|
|
|
|
IN ULONG PoolTag)
|
|
|
|
{
|
2011-12-25 18:21:05 +00:00
|
|
|
UNIMPLEMENTED;
|
|
|
|
return NULL;
|
2009-10-16 00:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @unimplemented
|
|
|
|
*/
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MmFreeMappingAddress(IN PVOID BaseAddress,
|
|
|
|
IN ULONG PoolTag)
|
|
|
|
{
|
2011-12-25 18:21:05 +00:00
|
|
|
UNIMPLEMENTED;
|
2009-10-16 00:28:33 +00:00
|
|
|
}
|
|
|
|
|
2009-06-22 08:22:41 +00:00
|
|
|
/* EOF */
|