2009-06-22 06:16:57 +00:00
|
|
|
/*
|
|
|
|
* PROJECT: ReactOS Kernel
|
|
|
|
* LICENSE: BSD - See COPYING.ARM in the top level directory
|
|
|
|
* FILE: ntoskrnl/mm/ARM3/miarm.h
|
|
|
|
* PURPOSE: ARM Memory Manager Header
|
|
|
|
* PROGRAMMERS: ReactOS Portable Systems Group
|
|
|
|
*/
|
|
|
|
|
2017-12-12 11:42:13 +00:00
|
|
|
#pragma once
|
|
|
|
|
2021-03-26 08:32:34 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2010-10-18 13:10:54 +00:00
|
|
|
#define MI_LOWEST_VAD_ADDRESS (PVOID)MM_LOWEST_USER_ADDRESS
|
2010-07-22 02:10:43 +00:00
|
|
|
|
2010-02-11 00:01:32 +00:00
|
|
|
/* Make the code cleaner with some definitions for size multiples */
|
2010-07-22 18:26:04 +00:00
|
|
|
#define _1KB (1024u)
|
2010-04-17 14:28:15 +00:00
|
|
|
#define _1MB (1024 * _1KB)
|
2010-07-22 18:26:04 +00:00
|
|
|
#define _1GB (1024 * _1MB)
|
2010-02-11 00:01:32 +00:00
|
|
|
|
2010-10-05 05:01:15 +00:00
|
|
|
/* Everyone loves 64K */
|
|
|
|
#define _64K (64 * _1KB)
|
|
|
|
|
2010-05-17 21:26:51 +00:00
|
|
|
/* Size of a page table */
|
2020-02-05 22:48:26 +00:00
|
|
|
#define PT_SIZE (PTE_PER_PAGE * sizeof(MMPTE))
|
2010-02-11 00:01:32 +00:00
|
|
|
|
2010-09-30 03:18:44 +00:00
|
|
|
/* Size of a page directory */
|
2020-02-05 22:48:26 +00:00
|
|
|
#define PD_SIZE (PDE_PER_PAGE * sizeof(MMPDE))
|
2015-05-05 20:36:07 +00:00
|
|
|
|
2010-09-30 03:18:44 +00:00
|
|
|
/* Size of all page directories for a process */
|
2020-02-05 22:48:26 +00:00
|
|
|
#define SYSTEM_PD_SIZE (PPE_PER_PAGE * PD_SIZE)
|
2010-02-11 00:01:32 +00:00
|
|
|
#ifdef _M_IX86
|
2010-09-30 03:18:44 +00:00
|
|
|
C_ASSERT(SYSTEM_PD_SIZE == PAGE_SIZE);
|
2010-02-11 00:01:32 +00:00
|
|
|
#endif
|
2009-10-15 05:56:41 +00:00
|
|
|
|
2010-04-20 22:47:51 +00:00
|
|
|
//
|
2012-09-02 20:56:55 +00:00
|
|
|
// Protection Bits part of the internal memory manager Protection Mask, from:
|
|
|
|
// http://reactos.org/wiki/Techwiki:Memory_management_in_the_Windows_XP_kernel
|
[NTOSKRNL]
Windows / ReactOS uses a software protection field called protection mask, which is stored inside invalid (Software) PTEs to provide information about the desired protection, when a page is made valid by the page fault handler. The mask consists of the values 0-7 specifying the read/write/execute rights, 0 being inaccessible aka MM_ZERO_ACCESS, plus 2 flag-like bits, for uncached and writecombine memory respectively. Both flags together don't make sense, so this combination is used to mark guard pages. Since all these flags only make sense when used together with a proper access (i.e. not MM_ZERO_ACCESS), the combination of these flags together with MM_ZERO_ACCESS was given special meaning: MM_DECOMMIT, which equals MM_GUARDPAGE | MM_ZERO_ACCESS is for decommitted pages, that are not yet erased to zero, MM_NOACCESS, which is the mask for pages that are mapped with PAGE_NOACCESS (this is to make sure that a software PTE of a committed page is never completely 0, which it could be, when MM_ZERO_ACCESS was used), and finally MM_OUTSWAPPED_KSTACK for outswapped kernel stacks. See also https://www.reactos.org/wiki/Techwiki:Memory_Protection_constants.
The next thing to know is that the number of PTEs that are not null is counted for each PDE. So once a page gets committed, a software PTE is written and the reference count is incremented. When the page is made valid by the fault handler, the count is not changed, when the page is decommitted, the MM_DECOMMIT software PTE is written and again the PTE stays non-null and nothing is changed. Only when the range is cleaned up totally, the PTEs get erased and the reference count is decremented. Now it happened that our page fault handler missed to validate the access rights of protection constants. The problem that came up with this is a major one: since a decommitted page is a software PTE with MM_DECOMMIT as the protection mask (which we remember has the MM_GUARDPAGE bit set), the fault handler considered faults on decommitted PTEs as faults on guard pages and simply removed the guard page flag, leaving a completely empty PTE behind! So the decommitted page got erased without decrementing the reference count. This lead to CORE-7445.
- Add protection flags (MM_GUARDPAGE, MM_WRITECOMBINE, MM_OUTSWAPPED_KSTACK)
- Instead of writing 0 to a PTE, use MI_WRITE_INVALID_PTE with MmZeroPte
- Implement MiIsAccessAllowed that checks for read/write/execute access and use it in MiAccessCheck
- Add some more ASSERTs
CORE-7445 #resolve
svn path=/trunk/; revision=61095
2013-11-25 00:18:33 +00:00
|
|
|
// https://www.reactos.org/wiki/Techwiki:Memory_Protection_constants
|
2010-04-20 22:47:51 +00:00
|
|
|
// and public assertions.
|
|
|
|
//
|
|
|
|
#define MM_ZERO_ACCESS 0
|
2010-12-22 16:14:58 +00:00
|
|
|
#define MM_READONLY 1
|
|
|
|
#define MM_EXECUTE 2
|
|
|
|
#define MM_EXECUTE_READ 3
|
2010-04-20 22:47:51 +00:00
|
|
|
#define MM_READWRITE 4
|
2010-12-22 16:14:58 +00:00
|
|
|
#define MM_WRITECOPY 5
|
|
|
|
#define MM_EXECUTE_READWRITE 6
|
|
|
|
#define MM_EXECUTE_WRITECOPY 7
|
2013-11-27 00:04:26 +00:00
|
|
|
#define MM_PROTECT_ACCESS 7
|
[NTOSKRNL]
Windows / ReactOS uses a software protection field called protection mask, which is stored inside invalid (Software) PTEs to provide information about the desired protection, when a page is made valid by the page fault handler. The mask consists of the values 0-7 specifying the read/write/execute rights, 0 being inaccessible aka MM_ZERO_ACCESS, plus 2 flag-like bits, for uncached and writecombine memory respectively. Both flags together don't make sense, so this combination is used to mark guard pages. Since all these flags only make sense when used together with a proper access (i.e. not MM_ZERO_ACCESS), the combination of these flags together with MM_ZERO_ACCESS was given special meaning: MM_DECOMMIT, which equals MM_GUARDPAGE | MM_ZERO_ACCESS is for decommitted pages, that are not yet erased to zero, MM_NOACCESS, which is the mask for pages that are mapped with PAGE_NOACCESS (this is to make sure that a software PTE of a committed page is never completely 0, which it could be, when MM_ZERO_ACCESS was used), and finally MM_OUTSWAPPED_KSTACK for outswapped kernel stacks. See also https://www.reactos.org/wiki/Techwiki:Memory_Protection_constants.
The next thing to know is that the number of PTEs that are not null is counted for each PDE. So once a page gets committed, a software PTE is written and the reference count is incremented. When the page is made valid by the fault handler, the count is not changed, when the page is decommitted, the MM_DECOMMIT software PTE is written and again the PTE stays non-null and nothing is changed. Only when the range is cleaned up totally, the PTEs get erased and the reference count is decremented. Now it happened that our page fault handler missed to validate the access rights of protection constants. The problem that came up with this is a major one: since a decommitted page is a software PTE with MM_DECOMMIT as the protection mask (which we remember has the MM_GUARDPAGE bit set), the fault handler considered faults on decommitted PTEs as faults on guard pages and simply removed the guard page flag, leaving a completely empty PTE behind! So the decommitted page got erased without decrementing the reference count. This lead to CORE-7445.
- Add protection flags (MM_GUARDPAGE, MM_WRITECOMBINE, MM_OUTSWAPPED_KSTACK)
- Instead of writing 0 to a PTE, use MI_WRITE_INVALID_PTE with MmZeroPte
- Implement MiIsAccessAllowed that checks for read/write/execute access and use it in MiAccessCheck
- Add some more ASSERTs
CORE-7445 #resolve
svn path=/trunk/; revision=61095
2013-11-25 00:18:33 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// These are flags on top of the actual protection mask
|
|
|
|
//
|
|
|
|
#define MM_NOCACHE 0x08
|
|
|
|
#define MM_GUARDPAGE 0x10
|
|
|
|
#define MM_WRITECOMBINE 0x18
|
2013-11-27 00:04:26 +00:00
|
|
|
#define MM_PROTECT_SPECIAL 0x18
|
[NTOSKRNL]
Windows / ReactOS uses a software protection field called protection mask, which is stored inside invalid (Software) PTEs to provide information about the desired protection, when a page is made valid by the page fault handler. The mask consists of the values 0-7 specifying the read/write/execute rights, 0 being inaccessible aka MM_ZERO_ACCESS, plus 2 flag-like bits, for uncached and writecombine memory respectively. Both flags together don't make sense, so this combination is used to mark guard pages. Since all these flags only make sense when used together with a proper access (i.e. not MM_ZERO_ACCESS), the combination of these flags together with MM_ZERO_ACCESS was given special meaning: MM_DECOMMIT, which equals MM_GUARDPAGE | MM_ZERO_ACCESS is for decommitted pages, that are not yet erased to zero, MM_NOACCESS, which is the mask for pages that are mapped with PAGE_NOACCESS (this is to make sure that a software PTE of a committed page is never completely 0, which it could be, when MM_ZERO_ACCESS was used), and finally MM_OUTSWAPPED_KSTACK for outswapped kernel stacks. See also https://www.reactos.org/wiki/Techwiki:Memory_Protection_constants.
The next thing to know is that the number of PTEs that are not null is counted for each PDE. So once a page gets committed, a software PTE is written and the reference count is incremented. When the page is made valid by the fault handler, the count is not changed, when the page is decommitted, the MM_DECOMMIT software PTE is written and again the PTE stays non-null and nothing is changed. Only when the range is cleaned up totally, the PTEs get erased and the reference count is decremented. Now it happened that our page fault handler missed to validate the access rights of protection constants. The problem that came up with this is a major one: since a decommitted page is a software PTE with MM_DECOMMIT as the protection mask (which we remember has the MM_GUARDPAGE bit set), the fault handler considered faults on decommitted PTEs as faults on guard pages and simply removed the guard page flag, leaving a completely empty PTE behind! So the decommitted page got erased without decrementing the reference count. This lead to CORE-7445.
- Add protection flags (MM_GUARDPAGE, MM_WRITECOMBINE, MM_OUTSWAPPED_KSTACK)
- Instead of writing 0 to a PTE, use MI_WRITE_INVALID_PTE with MmZeroPte
- Implement MiIsAccessAllowed that checks for read/write/execute access and use it in MiAccessCheck
- Add some more ASSERTs
CORE-7445 #resolve
svn path=/trunk/; revision=61095
2013-11-25 00:18:33 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// These are special cases
|
|
|
|
//
|
|
|
|
#define MM_DECOMMIT (MM_ZERO_ACCESS | MM_GUARDPAGE)
|
|
|
|
#define MM_NOACCESS (MM_ZERO_ACCESS | MM_WRITECOMBINE)
|
|
|
|
#define MM_OUTSWAPPED_KSTACK (MM_EXECUTE_WRITECOPY | MM_WRITECOMBINE)
|
2010-10-02 02:14:39 +00:00
|
|
|
#define MM_INVALID_PROTECTION 0xFFFFFFFF
|
2010-04-20 22:47:51 +00:00
|
|
|
|
2010-05-29 19:27:32 +00:00
|
|
|
//
|
|
|
|
// Specific PTE Definitions that map to the Memory Manager's Protection Mask Bits
|
|
|
|
// The Memory Manager's definition define the attributes that must be preserved
|
|
|
|
// and these PTE definitions describe the attributes in the hardware sense. This
|
|
|
|
// helps deal with hardware differences between the actual boolean expression of
|
|
|
|
// the argument.
|
|
|
|
//
|
|
|
|
// For example, in the logical attributes, we want to express read-only as a flag
|
|
|
|
// but on x86, it is writability that must be set. On the other hand, on x86, just
|
2010-12-22 16:14:58 +00:00
|
|
|
// like in the kernel, it is disabling the caches that requires a special flag,
|
2010-05-29 19:27:32 +00:00
|
|
|
// while on certain architectures such as ARM, it is enabling the cache which
|
|
|
|
// requires a flag.
|
|
|
|
//
|
2019-01-18 21:11:43 +00:00
|
|
|
#if defined(_M_IX86)
|
2010-05-29 19:27:32 +00:00
|
|
|
//
|
|
|
|
// Access Flags
|
|
|
|
//
|
2010-11-24 17:26:30 +00:00
|
|
|
#define PTE_READONLY 0 // Doesn't exist on x86
|
2010-05-29 19:27:32 +00:00
|
|
|
#define PTE_EXECUTE 0 // Not worrying about NX yet
|
|
|
|
#define PTE_EXECUTE_READ 0 // Not worrying about NX yet
|
|
|
|
#define PTE_READWRITE 0x2
|
|
|
|
#define PTE_WRITECOPY 0x200
|
2010-11-24 17:26:30 +00:00
|
|
|
#define PTE_EXECUTE_READWRITE 0x2 // Not worrying about NX yet
|
2010-05-29 19:27:32 +00:00
|
|
|
#define PTE_EXECUTE_WRITECOPY 0x200
|
2010-07-22 20:47:28 +00:00
|
|
|
#define PTE_PROTOTYPE 0x400
|
2011-02-11 13:48:41 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// State Flags
|
|
|
|
//
|
|
|
|
#define PTE_VALID 0x1
|
|
|
|
#define PTE_ACCESSED 0x20
|
|
|
|
#define PTE_DIRTY 0x40
|
|
|
|
|
2010-05-29 19:27:32 +00:00
|
|
|
//
|
|
|
|
// Cache flags
|
|
|
|
//
|
|
|
|
#define PTE_ENABLE_CACHE 0
|
|
|
|
#define PTE_DISABLE_CACHE 0x10
|
|
|
|
#define PTE_WRITECOMBINED_CACHE 0x10
|
2019-01-18 21:11:43 +00:00
|
|
|
#define PTE_PROTECT_MASK 0x612
|
|
|
|
#elif defined(_M_AMD64)
|
|
|
|
//
|
|
|
|
// Access Flags
|
|
|
|
//
|
|
|
|
#define PTE_READONLY 0x8000000000000000ULL
|
|
|
|
#define PTE_EXECUTE 0x0000000000000000ULL
|
|
|
|
#define PTE_EXECUTE_READ PTE_EXECUTE /* EXECUTE implies READ on x64 */
|
|
|
|
#define PTE_READWRITE 0x8000000000000002ULL
|
|
|
|
#define PTE_WRITECOPY 0x8000000000000200ULL
|
|
|
|
#define PTE_EXECUTE_READWRITE 0x0000000000000002ULL
|
|
|
|
#define PTE_EXECUTE_WRITECOPY 0x0000000000000200ULL
|
|
|
|
#define PTE_PROTOTYPE 0x0000000000000400ULL
|
|
|
|
|
|
|
|
//
|
|
|
|
// State Flags
|
|
|
|
//
|
|
|
|
#define PTE_VALID 0x0000000000000001ULL
|
|
|
|
#define PTE_ACCESSED 0x0000000000000020ULL
|
|
|
|
#define PTE_DIRTY 0x0000000000000040ULL
|
|
|
|
|
|
|
|
//
|
|
|
|
// Cache flags
|
|
|
|
//
|
|
|
|
#define PTE_ENABLE_CACHE 0x0000000000000000ULL
|
|
|
|
#define PTE_DISABLE_CACHE 0x0000000000000010ULL
|
|
|
|
#define PTE_WRITECOMBINED_CACHE 0x0000000000000010ULL
|
|
|
|
#define PTE_PROTECT_MASK 0x8000000000000612ULL
|
2010-05-29 19:27:32 +00:00
|
|
|
#elif defined(_M_ARM)
|
2010-11-24 17:26:30 +00:00
|
|
|
#define PTE_READONLY 0x200
|
|
|
|
#define PTE_EXECUTE 0 // Not worrying about NX yet
|
|
|
|
#define PTE_EXECUTE_READ 0 // Not worrying about NX yet
|
|
|
|
#define PTE_READWRITE 0 // Doesn't exist on ARM
|
|
|
|
#define PTE_WRITECOPY 0 // Doesn't exist on ARM
|
|
|
|
#define PTE_EXECUTE_READWRITE 0 // Not worrying about NX yet
|
|
|
|
#define PTE_EXECUTE_WRITECOPY 0 // Not worrying about NX yet
|
|
|
|
#define PTE_PROTOTYPE 0x400 // Using the Shared bit
|
2019-01-18 21:11:43 +00:00
|
|
|
|
2010-11-24 17:26:30 +00:00
|
|
|
//
|
|
|
|
// Cache flags
|
|
|
|
//
|
|
|
|
#define PTE_ENABLE_CACHE 0
|
|
|
|
#define PTE_DISABLE_CACHE 0x10
|
|
|
|
#define PTE_WRITECOMBINED_CACHE 0x10
|
2019-01-18 21:11:43 +00:00
|
|
|
#define PTE_PROTECT_MASK 0x610
|
2010-05-29 19:27:32 +00:00
|
|
|
#else
|
|
|
|
#error Define these please!
|
|
|
|
#endif
|
2010-07-25 01:22:15 +00:00
|
|
|
|
2019-01-18 21:11:43 +00:00
|
|
|
//
|
|
|
|
// Mask for image section page protection
|
|
|
|
//
|
|
|
|
#define IMAGE_SCN_PROTECTION_MASK (IMAGE_SCN_MEM_WRITE | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_EXECUTE)
|
|
|
|
|
2012-02-04 23:08:20 +00:00
|
|
|
extern const ULONG_PTR MmProtectToPteMask[32];
|
2010-10-16 21:02:48 +00:00
|
|
|
extern const ULONG MmProtectToValue[32];
|
2010-07-25 01:22:15 +00:00
|
|
|
|
2010-05-12 20:48:15 +00:00
|
|
|
//
|
|
|
|
// Assertions for session images, addresses, and PTEs
|
|
|
|
//
|
|
|
|
#define MI_IS_SESSION_IMAGE_ADDRESS(Address) \
|
|
|
|
(((Address) >= MiSessionImageStart) && ((Address) < MiSessionImageEnd))
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-05-12 20:48:15 +00:00
|
|
|
#define MI_IS_SESSION_ADDRESS(Address) \
|
|
|
|
(((Address) >= MmSessionBase) && ((Address) < MiSessionSpaceEnd))
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-05-12 20:48:15 +00:00
|
|
|
#define MI_IS_SESSION_PTE(Pte) \
|
|
|
|
((((PMMPTE)Pte) >= MiSessionBasePte) && (((PMMPTE)Pte) < MiSessionLastPte))
|
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
#define MI_IS_PAGE_TABLE_ADDRESS(Address) \
|
|
|
|
(((PVOID)(Address) >= (PVOID)PTE_BASE) && ((PVOID)(Address) <= (PVOID)PTE_TOP))
|
|
|
|
|
|
|
|
#define MI_IS_SYSTEM_PAGE_TABLE_ADDRESS(Address) \
|
|
|
|
(((Address) >= (PVOID)MiAddressToPte(MmSystemRangeStart)) && ((Address) <= (PVOID)PTE_TOP))
|
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
#define MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS(Address) \
|
|
|
|
(((PVOID)(Address) >= (PVOID)PTE_BASE) && ((PVOID)(Address) <= (PVOID)MmHyperSpaceEnd))
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-05-09 18:17:53 +00:00
|
|
|
//
|
|
|
|
// Creates a software PTE with the given protection
|
|
|
|
//
|
2010-05-12 22:47:46 +00:00
|
|
|
#define MI_MAKE_SOFTWARE_PTE(p, x) ((p)->u.Long = (x << MM_PTE_SOFTWARE_PROTECTION_BITS))
|
2010-05-09 18:17:53 +00:00
|
|
|
|
2010-05-29 18:33:50 +00:00
|
|
|
//
|
|
|
|
// Marks a PTE as deleted
|
|
|
|
//
|
|
|
|
#define MI_SET_PFN_DELETED(x) ((x)->PteAddress = (PMMPTE)((ULONG_PTR)(x)->PteAddress | 1))
|
|
|
|
#define MI_IS_PFN_DELETED(x) ((ULONG_PTR)((x)->PteAddress) & 1)
|
|
|
|
|
2010-04-20 22:47:51 +00:00
|
|
|
//
|
|
|
|
// Special values for LoadedImports
|
|
|
|
//
|
2023-03-15 19:31:28 +00:00
|
|
|
#define MM_SYSLDR_NO_IMPORTS ((PVOID)(ULONG_PTR)-2)
|
|
|
|
#define MM_SYSLDR_BOOT_LOADED ((PVOID)(ULONG_PTR)-1)
|
2010-04-20 22:47:51 +00:00
|
|
|
#define MM_SYSLDR_SINGLE_ENTRY 0x1
|
|
|
|
|
2012-07-15 23:42:27 +00:00
|
|
|
//
|
|
|
|
// Number of initial session IDs
|
|
|
|
//
|
|
|
|
#define MI_INITIAL_SESSION_IDS 64
|
|
|
|
|
2010-09-29 00:13:09 +00:00
|
|
|
#if defined(_M_IX86) || defined(_M_ARM)
|
2010-02-20 14:40:21 +00:00
|
|
|
//
|
|
|
|
// PFN List Sentinel
|
|
|
|
//
|
|
|
|
#define LIST_HEAD 0xFFFFFFFF
|
|
|
|
|
2010-09-29 00:13:09 +00:00
|
|
|
//
|
|
|
|
// Because GCC cannot automatically downcast 0xFFFFFFFF to lesser-width bits,
|
|
|
|
// we need a manual definition suited to the number of bits in the PteFrame.
|
|
|
|
// This is used as a LIST_HEAD for the colored list
|
|
|
|
//
|
|
|
|
#define COLORED_LIST_HEAD ((1 << 25) - 1) // 0x1FFFFFF
|
|
|
|
#elif defined(_M_AMD64)
|
|
|
|
#define LIST_HEAD 0xFFFFFFFFFFFFFFFFLL
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
#define COLORED_LIST_HEAD ((1ULL << 57) - 1) // 0x1FFFFFFFFFFFFFFLL
|
2010-09-29 00:13:09 +00:00
|
|
|
#else
|
|
|
|
#error Define these please!
|
|
|
|
#endif
|
|
|
|
|
2010-09-29 01:10:28 +00:00
|
|
|
//
|
|
|
|
// Returns the color of a page
|
|
|
|
//
|
|
|
|
#define MI_GET_PAGE_COLOR(x) ((x) & MmSecondaryColorMask)
|
2011-07-04 16:26:52 +00:00
|
|
|
#define MI_GET_NEXT_COLOR() (MI_GET_PAGE_COLOR(++MmSystemPageColor))
|
2010-09-29 01:10:28 +00:00
|
|
|
#define MI_GET_NEXT_PROCESS_COLOR(x) (MI_GET_PAGE_COLOR(++(x)->NextPageColor))
|
|
|
|
|
2010-10-04 18:34:41 +00:00
|
|
|
//
|
|
|
|
// Prototype PTEs that don't yet have a pagefile association
|
|
|
|
//
|
2015-05-05 20:36:07 +00:00
|
|
|
#ifdef _WIN64
|
2012-02-04 23:08:20 +00:00
|
|
|
#define MI_PTE_LOOKUP_NEEDED 0xffffffffULL
|
|
|
|
#else
|
2010-10-04 18:34:41 +00:00
|
|
|
#define MI_PTE_LOOKUP_NEEDED 0xFFFFF
|
2012-02-04 23:08:20 +00:00
|
|
|
#endif
|
2010-10-04 18:34:41 +00:00
|
|
|
|
2012-07-15 23:42:27 +00:00
|
|
|
//
|
|
|
|
// Number of session data and tag pages
|
|
|
|
//
|
|
|
|
#define MI_SESSION_DATA_PAGES_MAXIMUM (MM_ALLOCATION_GRANULARITY / PAGE_SIZE)
|
|
|
|
#define MI_SESSION_TAG_PAGES_MAXIMUM (MM_ALLOCATION_GRANULARITY / PAGE_SIZE)
|
|
|
|
|
2012-09-02 18:54:05 +00:00
|
|
|
//
|
|
|
|
// Used by MiCheckSecuredVad
|
|
|
|
//
|
|
|
|
#define MM_READ_WRITE_ALLOWED 11
|
|
|
|
#define MM_READ_ONLY_ALLOWED 10
|
|
|
|
#define MM_NO_ACCESS_ALLOWED 01
|
|
|
|
#define MM_DELETE_CHECK 85
|
2012-07-15 23:42:27 +00:00
|
|
|
|
2010-10-04 20:19:03 +00:00
|
|
|
//
|
|
|
|
// System views are binned into 64K chunks
|
|
|
|
//
|
2010-10-05 05:01:15 +00:00
|
|
|
#define MI_SYSTEM_VIEW_BUCKET_SIZE _64K
|
2010-10-04 20:19:03 +00:00
|
|
|
|
2009-07-09 09:33:49 +00:00
|
|
|
//
|
|
|
|
// FIXFIX: These should go in ex.h after the pool merge
|
|
|
|
//
|
2015-05-05 20:36:07 +00:00
|
|
|
#ifdef _WIN64
|
2010-06-05 16:53:54 +00:00
|
|
|
#define POOL_BLOCK_SIZE 16
|
|
|
|
#else
|
|
|
|
#define POOL_BLOCK_SIZE 8
|
|
|
|
#endif
|
|
|
|
#define POOL_LISTS_PER_PAGE (PAGE_SIZE / POOL_BLOCK_SIZE)
|
- Initialize the value of MmBootImageSize in ARM3 now.
- Also fix its value such that it's PDE aligned -- this makes sure that we don't step on any of the boot loader's PDE mappings and can blow everything away later.
- Initialize the MmSystem/User/Probe Addresses in ARM3 as well (no functional change).
- Print out a lot more of the VA ranges in ARM3's Phase 2 initialization. Most of the VA space is now dumped out.
- Write out the code to initialize session space VA ranges
- Image space, view space, working set space and pool space values are all calculated properly.
- NT default sizes are used, without support for registry overrides (yet).
- Also system view space is initialized and sized.
- Code is heavily commented and explained for inquisitive minds.
- Define the paged pool start address, minimum/default size, and add some extra pool header asserts/definitions.
- Define MmPagedPoolInfo to keep track of all paged pool related information (start/end PTEs, VA ranges, allocation/free bitmaps, etc).
- Fixed a lot of comments and added some new ones to provide extra clarity.
- Implement MiBuildPagedPool. It has two jobs:
- Build and create the shadow system page directory, which double-maps the System process' PDE.
- More explenations are in the comments.
- Define the paged pool region and size, and initialize MmPagedPoolInfo accordingly.
- Create and setup the paged pool allocation and free bitmaps (again explained in the comments).
- There shouldn't be any real functional change yet due to this commit.
- We need to create memory areas for session space and system view space otherwise the VA regions could get used by ReactOS instead.
svn path=/trunk/; revision=42148
2009-07-22 22:46:29 +00:00
|
|
|
#define BASE_POOL_TYPE_MASK 1
|
2010-06-05 16:53:54 +00:00
|
|
|
#define POOL_MAX_ALLOC (PAGE_SIZE - (sizeof(POOL_HEADER) + POOL_BLOCK_SIZE))
|
2009-07-09 09:33:49 +00:00
|
|
|
|
2012-02-27 08:21:15 +00:00
|
|
|
//
|
|
|
|
// Pool debugging/analysis/tracing flags
|
|
|
|
//
|
|
|
|
#define POOL_FLAG_CHECK_TIMERS 0x1
|
|
|
|
#define POOL_FLAG_CHECK_WORKERS 0x2
|
|
|
|
#define POOL_FLAG_CHECK_RESOURCES 0x4
|
|
|
|
#define POOL_FLAG_VERIFIER 0x8
|
|
|
|
#define POOL_FLAG_CHECK_DEADLOCK 0x10
|
|
|
|
#define POOL_FLAG_SPECIAL_POOL 0x20
|
|
|
|
#define POOL_FLAG_DBGPRINT_ON_FAILURE 0x40
|
|
|
|
#define POOL_FLAG_CRASH_ON_FAILURE 0x80
|
|
|
|
|
|
|
|
//
|
|
|
|
// BAD_POOL_HEADER codes during pool bugcheck
|
|
|
|
//
|
|
|
|
#define POOL_CORRUPTED_LIST 3
|
|
|
|
#define POOL_SIZE_OR_INDEX_MISMATCH 5
|
|
|
|
#define POOL_ENTRIES_NOT_ALIGNED_PREVIOUS 6
|
|
|
|
#define POOL_HEADER_NOT_ALIGNED 7
|
|
|
|
#define POOL_HEADER_IS_ZERO 8
|
|
|
|
#define POOL_ENTRIES_NOT_ALIGNED_NEXT 9
|
|
|
|
#define POOL_ENTRY_NOT_FOUND 10
|
|
|
|
|
|
|
|
//
|
|
|
|
// BAD_POOL_CALLER codes during pool bugcheck
|
|
|
|
//
|
|
|
|
#define POOL_ENTRY_CORRUPTED 1
|
|
|
|
#define POOL_ENTRY_ALREADY_FREE 6
|
|
|
|
#define POOL_ENTRY_NOT_ALLOCATED 7
|
|
|
|
#define POOL_ALLOC_IRQL_INVALID 8
|
|
|
|
#define POOL_FREE_IRQL_INVALID 9
|
|
|
|
#define POOL_BILLED_PROCESS_INVALID 13
|
|
|
|
#define POOL_HEADER_SIZE_INVALID 32
|
|
|
|
|
2009-07-09 09:33:49 +00:00
|
|
|
typedef struct _POOL_DESCRIPTOR
|
|
|
|
{
|
|
|
|
POOL_TYPE PoolType;
|
|
|
|
ULONG PoolIndex;
|
|
|
|
ULONG RunningAllocs;
|
|
|
|
ULONG RunningDeAllocs;
|
|
|
|
ULONG TotalPages;
|
|
|
|
ULONG TotalBigPages;
|
|
|
|
ULONG Threshold;
|
|
|
|
PVOID LockAddress;
|
|
|
|
PVOID PendingFrees;
|
|
|
|
LONG PendingFreeDepth;
|
|
|
|
SIZE_T TotalBytes;
|
|
|
|
SIZE_T Spare0;
|
|
|
|
LIST_ENTRY ListHeads[POOL_LISTS_PER_PAGE];
|
|
|
|
} POOL_DESCRIPTOR, *PPOOL_DESCRIPTOR;
|
|
|
|
|
|
|
|
typedef struct _POOL_HEADER
|
|
|
|
{
|
|
|
|
union
|
|
|
|
{
|
|
|
|
struct
|
|
|
|
{
|
2015-05-05 20:36:07 +00:00
|
|
|
#ifdef _WIN64
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
USHORT PreviousSize:8;
|
|
|
|
USHORT PoolIndex:8;
|
|
|
|
USHORT BlockSize:8;
|
|
|
|
USHORT PoolType:8;
|
2010-06-05 16:53:54 +00:00
|
|
|
#else
|
2009-07-09 09:33:49 +00:00
|
|
|
USHORT PreviousSize:9;
|
|
|
|
USHORT PoolIndex:7;
|
|
|
|
USHORT BlockSize:9;
|
|
|
|
USHORT PoolType:7;
|
2010-06-05 16:53:54 +00:00
|
|
|
#endif
|
2009-07-09 09:33:49 +00:00
|
|
|
};
|
|
|
|
ULONG Ulong1;
|
|
|
|
};
|
2015-05-05 20:36:07 +00:00
|
|
|
#ifdef _WIN64
|
2010-06-05 16:53:54 +00:00
|
|
|
ULONG PoolTag;
|
|
|
|
#endif
|
2009-07-09 09:33:49 +00:00
|
|
|
union
|
|
|
|
{
|
2015-05-05 20:36:07 +00:00
|
|
|
#ifdef _WIN64
|
2010-06-05 16:53:54 +00:00
|
|
|
PEPROCESS ProcessBilled;
|
|
|
|
#else
|
2009-07-09 09:33:49 +00:00
|
|
|
ULONG PoolTag;
|
2010-06-05 16:53:54 +00:00
|
|
|
#endif
|
2009-07-09 09:33:49 +00:00
|
|
|
struct
|
|
|
|
{
|
|
|
|
USHORT AllocatorBackTraceIndex;
|
|
|
|
USHORT PoolTagHash;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
} POOL_HEADER, *PPOOL_HEADER;
|
- Initialize the value of MmBootImageSize in ARM3 now.
- Also fix its value such that it's PDE aligned -- this makes sure that we don't step on any of the boot loader's PDE mappings and can blow everything away later.
- Initialize the MmSystem/User/Probe Addresses in ARM3 as well (no functional change).
- Print out a lot more of the VA ranges in ARM3's Phase 2 initialization. Most of the VA space is now dumped out.
- Write out the code to initialize session space VA ranges
- Image space, view space, working set space and pool space values are all calculated properly.
- NT default sizes are used, without support for registry overrides (yet).
- Also system view space is initialized and sized.
- Code is heavily commented and explained for inquisitive minds.
- Define the paged pool start address, minimum/default size, and add some extra pool header asserts/definitions.
- Define MmPagedPoolInfo to keep track of all paged pool related information (start/end PTEs, VA ranges, allocation/free bitmaps, etc).
- Fixed a lot of comments and added some new ones to provide extra clarity.
- Implement MiBuildPagedPool. It has two jobs:
- Build and create the shadow system page directory, which double-maps the System process' PDE.
- More explenations are in the comments.
- Define the paged pool region and size, and initialize MmPagedPoolInfo accordingly.
- Create and setup the paged pool allocation and free bitmaps (again explained in the comments).
- There shouldn't be any real functional change yet due to this commit.
- We need to create memory areas for session space and system view space otherwise the VA regions could get used by ReactOS instead.
svn path=/trunk/; revision=42148
2009-07-22 22:46:29 +00:00
|
|
|
|
2010-06-05 16:53:54 +00:00
|
|
|
C_ASSERT(sizeof(POOL_HEADER) == POOL_BLOCK_SIZE);
|
|
|
|
C_ASSERT(POOL_BLOCK_SIZE == sizeof(LIST_ENTRY));
|
- Initialize the value of MmBootImageSize in ARM3 now.
- Also fix its value such that it's PDE aligned -- this makes sure that we don't step on any of the boot loader's PDE mappings and can blow everything away later.
- Initialize the MmSystem/User/Probe Addresses in ARM3 as well (no functional change).
- Print out a lot more of the VA ranges in ARM3's Phase 2 initialization. Most of the VA space is now dumped out.
- Write out the code to initialize session space VA ranges
- Image space, view space, working set space and pool space values are all calculated properly.
- NT default sizes are used, without support for registry overrides (yet).
- Also system view space is initialized and sized.
- Code is heavily commented and explained for inquisitive minds.
- Define the paged pool start address, minimum/default size, and add some extra pool header asserts/definitions.
- Define MmPagedPoolInfo to keep track of all paged pool related information (start/end PTEs, VA ranges, allocation/free bitmaps, etc).
- Fixed a lot of comments and added some new ones to provide extra clarity.
- Implement MiBuildPagedPool. It has two jobs:
- Build and create the shadow system page directory, which double-maps the System process' PDE.
- More explenations are in the comments.
- Define the paged pool region and size, and initialize MmPagedPoolInfo accordingly.
- Create and setup the paged pool allocation and free bitmaps (again explained in the comments).
- There shouldn't be any real functional change yet due to this commit.
- We need to create memory areas for session space and system view space otherwise the VA regions could get used by ReactOS instead.
svn path=/trunk/; revision=42148
2009-07-22 22:46:29 +00:00
|
|
|
|
2012-02-25 18:51:21 +00:00
|
|
|
typedef struct _POOL_TRACKER_TABLE
|
|
|
|
{
|
|
|
|
ULONG Key;
|
2012-02-25 23:14:37 +00:00
|
|
|
LONG NonPagedAllocs;
|
|
|
|
LONG NonPagedFrees;
|
2012-02-25 18:51:21 +00:00
|
|
|
SIZE_T NonPagedBytes;
|
2012-02-25 23:14:37 +00:00
|
|
|
LONG PagedAllocs;
|
|
|
|
LONG PagedFrees;
|
2012-02-25 18:51:21 +00:00
|
|
|
SIZE_T PagedBytes;
|
|
|
|
} POOL_TRACKER_TABLE, *PPOOL_TRACKER_TABLE;
|
|
|
|
|
|
|
|
typedef struct _POOL_TRACKER_BIG_PAGES
|
|
|
|
{
|
|
|
|
PVOID Va;
|
|
|
|
ULONG Key;
|
|
|
|
ULONG NumberOfPages;
|
|
|
|
PVOID QuotaObject;
|
|
|
|
} POOL_TRACKER_BIG_PAGES, *PPOOL_TRACKER_BIG_PAGES;
|
|
|
|
|
2009-10-31 01:02:35 +00:00
|
|
|
extern ULONG ExpNumberOfPagedPools;
|
2009-10-04 20:19:43 +00:00
|
|
|
extern POOL_DESCRIPTOR NonPagedPoolDescriptor;
|
2009-10-31 01:02:35 +00:00
|
|
|
extern PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
|
2012-02-25 18:51:21 +00:00
|
|
|
extern PPOOL_TRACKER_TABLE PoolTrackTable;
|
2009-10-04 20:19:43 +00:00
|
|
|
|
2009-07-09 09:33:49 +00:00
|
|
|
//
|
|
|
|
// END FIXFIX
|
|
|
|
//
|
|
|
|
|
2010-04-17 14:28:15 +00:00
|
|
|
typedef struct _MI_LARGE_PAGE_DRIVER_ENTRY
|
|
|
|
{
|
|
|
|
LIST_ENTRY Links;
|
|
|
|
UNICODE_STRING BaseName;
|
|
|
|
} MI_LARGE_PAGE_DRIVER_ENTRY, *PMI_LARGE_PAGE_DRIVER_ENTRY;
|
|
|
|
|
2009-06-22 08:51:28 +00:00
|
|
|
typedef enum _MMSYSTEM_PTE_POOL_TYPE
|
|
|
|
{
|
|
|
|
SystemPteSpace,
|
|
|
|
NonPagedPoolExpansion,
|
|
|
|
MaximumPtePoolTypes
|
|
|
|
} MMSYSTEM_PTE_POOL_TYPE;
|
|
|
|
|
2009-06-27 03:03:24 +00:00
|
|
|
typedef enum _MI_PFN_CACHE_ATTRIBUTE
|
|
|
|
{
|
|
|
|
MiNonCached,
|
|
|
|
MiCached,
|
|
|
|
MiWriteCombined,
|
|
|
|
MiNotMapped
|
|
|
|
} MI_PFN_CACHE_ATTRIBUTE, *PMI_PFN_CACHE_ATTRIBUTE;
|
|
|
|
|
2009-06-27 22:16:47 +00:00
|
|
|
typedef struct _PHYSICAL_MEMORY_RUN
|
|
|
|
{
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
PFN_NUMBER BasePage;
|
|
|
|
PFN_NUMBER PageCount;
|
2009-06-27 22:16:47 +00:00
|
|
|
} PHYSICAL_MEMORY_RUN, *PPHYSICAL_MEMORY_RUN;
|
|
|
|
|
|
|
|
typedef struct _PHYSICAL_MEMORY_DESCRIPTOR
|
|
|
|
{
|
|
|
|
ULONG NumberOfRuns;
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
PFN_NUMBER NumberOfPages;
|
2009-06-27 22:16:47 +00:00
|
|
|
PHYSICAL_MEMORY_RUN Run[1];
|
|
|
|
} PHYSICAL_MEMORY_DESCRIPTOR, *PPHYSICAL_MEMORY_DESCRIPTOR;
|
|
|
|
|
2010-02-10 18:44:30 +00:00
|
|
|
typedef struct _MMCOLOR_TABLES
|
|
|
|
{
|
|
|
|
PFN_NUMBER Flink;
|
|
|
|
PVOID Blink;
|
|
|
|
PFN_NUMBER Count;
|
|
|
|
} MMCOLOR_TABLES, *PMMCOLOR_TABLES;
|
|
|
|
|
2010-04-20 22:47:51 +00:00
|
|
|
typedef struct _MI_LARGE_PAGE_RANGES
|
|
|
|
{
|
|
|
|
PFN_NUMBER StartFrame;
|
|
|
|
PFN_NUMBER LastFrame;
|
|
|
|
} MI_LARGE_PAGE_RANGES, *PMI_LARGE_PAGE_RANGES;
|
|
|
|
|
2010-10-04 18:34:41 +00:00
|
|
|
typedef struct _MMVIEW
|
|
|
|
{
|
|
|
|
ULONG_PTR Entry;
|
|
|
|
PCONTROL_AREA ControlArea;
|
|
|
|
} MMVIEW, *PMMVIEW;
|
|
|
|
|
|
|
|
typedef struct _MMSESSION
|
|
|
|
{
|
|
|
|
KGUARDED_MUTEX SystemSpaceViewLock;
|
|
|
|
PKGUARDED_MUTEX SystemSpaceViewLockPointer;
|
|
|
|
PCHAR SystemSpaceViewStart;
|
|
|
|
PMMVIEW SystemSpaceViewTable;
|
|
|
|
ULONG SystemSpaceHashSize;
|
|
|
|
ULONG SystemSpaceHashEntries;
|
|
|
|
ULONG SystemSpaceHashKey;
|
|
|
|
ULONG BitmapFailures;
|
|
|
|
PRTL_BITMAP SystemSpaceBitMap;
|
|
|
|
} MMSESSION, *PMMSESSION;
|
|
|
|
|
2012-07-15 23:42:27 +00:00
|
|
|
typedef struct _MM_SESSION_SPACE_FLAGS
|
|
|
|
{
|
|
|
|
ULONG Initialized:1;
|
|
|
|
ULONG DeletePending:1;
|
|
|
|
ULONG Filler:30;
|
|
|
|
} MM_SESSION_SPACE_FLAGS;
|
|
|
|
|
|
|
|
typedef struct _MM_SESSION_SPACE
|
|
|
|
{
|
|
|
|
struct _MM_SESSION_SPACE *GlobalVirtualAddress;
|
|
|
|
LONG ReferenceCount;
|
|
|
|
union
|
|
|
|
{
|
|
|
|
ULONG LongFlags;
|
|
|
|
MM_SESSION_SPACE_FLAGS Flags;
|
|
|
|
} u;
|
|
|
|
ULONG SessionId;
|
|
|
|
LIST_ENTRY ProcessList;
|
|
|
|
LARGE_INTEGER LastProcessSwappedOutTime;
|
|
|
|
PFN_NUMBER SessionPageDirectoryIndex;
|
|
|
|
SIZE_T NonPageablePages;
|
|
|
|
SIZE_T CommittedPages;
|
|
|
|
PVOID PagedPoolStart;
|
|
|
|
PVOID PagedPoolEnd;
|
2015-05-10 19:35:00 +00:00
|
|
|
PMMPDE PagedPoolBasePde;
|
2012-07-15 23:42:27 +00:00
|
|
|
ULONG Color;
|
|
|
|
LONG ResidentProcessCount;
|
|
|
|
ULONG SessionPoolAllocationFailures[4];
|
|
|
|
LIST_ENTRY ImageList;
|
|
|
|
LCID LocaleId;
|
|
|
|
ULONG AttachCount;
|
|
|
|
KEVENT AttachEvent;
|
|
|
|
PEPROCESS LastProcess;
|
|
|
|
LONG ProcessReferenceToSession;
|
|
|
|
LIST_ENTRY WsListEntry;
|
|
|
|
GENERAL_LOOKASIDE Lookaside[SESSION_POOL_LOOKASIDES];
|
|
|
|
MMSESSION Session;
|
|
|
|
KGUARDED_MUTEX PagedPoolMutex;
|
|
|
|
MM_PAGED_POOL_INFO PagedPoolInfo;
|
|
|
|
MMSUPPORT Vm;
|
|
|
|
PMMWSLE Wsle;
|
|
|
|
PDRIVER_UNLOAD Win32KDriverUnload;
|
|
|
|
POOL_DESCRIPTOR PagedPool;
|
|
|
|
#if defined (_M_AMD64)
|
2015-05-10 19:35:00 +00:00
|
|
|
MMPDE PageDirectory;
|
2012-07-15 23:42:27 +00:00
|
|
|
#else
|
2015-05-10 19:35:00 +00:00
|
|
|
PMMPDE PageTables;
|
2012-07-15 23:42:27 +00:00
|
|
|
#endif
|
|
|
|
#if defined (_M_AMD64)
|
|
|
|
PMMPTE SpecialPoolFirstPte;
|
|
|
|
PMMPTE SpecialPoolLastPte;
|
|
|
|
PMMPTE NextPdeForSpecialPoolExpansion;
|
|
|
|
PMMPTE LastPdeForSpecialPoolExpansion;
|
|
|
|
PFN_NUMBER SpecialPagesInUse;
|
|
|
|
#endif
|
|
|
|
LONG ImageLoadingCount;
|
|
|
|
} MM_SESSION_SPACE, *PMM_SESSION_SPACE;
|
|
|
|
|
|
|
|
extern PMM_SESSION_SPACE MmSessionSpace;
|
2009-06-22 06:16:57 +00:00
|
|
|
extern MMPTE HyperTemplatePte;
|
2010-05-17 21:26:51 +00:00
|
|
|
extern MMPDE ValidKernelPde;
|
2010-02-10 13:56:54 +00:00
|
|
|
extern MMPTE ValidKernelPte;
|
2012-07-15 23:42:27 +00:00
|
|
|
extern MMPDE ValidKernelPdeLocal;
|
|
|
|
extern MMPTE ValidKernelPteLocal;
|
2010-07-22 18:37:27 +00:00
|
|
|
extern MMPDE DemandZeroPde;
|
2010-11-02 15:02:41 +00:00
|
|
|
extern MMPTE DemandZeroPte;
|
2010-07-22 20:47:28 +00:00
|
|
|
extern MMPTE PrototypePte;
|
2012-02-29 16:58:46 +00:00
|
|
|
extern MMPTE MmDecommittedPte;
|
2010-04-20 22:47:51 +00:00
|
|
|
extern BOOLEAN MmLargeSystemCache;
|
|
|
|
extern BOOLEAN MmZeroPageFile;
|
|
|
|
extern BOOLEAN MmProtectFreedNonPagedPool;
|
|
|
|
extern BOOLEAN MmTrackLockedPages;
|
|
|
|
extern BOOLEAN MmTrackPtes;
|
|
|
|
extern BOOLEAN MmDynamicPfn;
|
|
|
|
extern BOOLEAN MmMirroring;
|
|
|
|
extern BOOLEAN MmMakeLowMemory;
|
|
|
|
extern BOOLEAN MmEnforceWriteProtection;
|
2010-07-22 23:22:57 +00:00
|
|
|
extern SIZE_T MmAllocationFragment;
|
2010-04-20 22:47:51 +00:00
|
|
|
extern ULONG MmConsumedPoolPercentage;
|
|
|
|
extern ULONG MmVerifyDriverBufferType;
|
|
|
|
extern ULONG MmVerifyDriverLevel;
|
|
|
|
extern WCHAR MmVerifyDriverBuffer[512];
|
|
|
|
extern WCHAR MmLargePageDriverBuffer[512];
|
|
|
|
extern LIST_ENTRY MiLargePageDriverList;
|
|
|
|
extern BOOLEAN MiLargePageAllDrivers;
|
|
|
|
extern ULONG MmVerifyDriverBufferLength;
|
|
|
|
extern ULONG MmLargePageDriverBufferLength;
|
2010-07-16 00:34:26 +00:00
|
|
|
extern SIZE_T MmSizeOfNonPagedPoolInBytes;
|
|
|
|
extern SIZE_T MmMaximumNonPagedPoolInBytes;
|
[NTOS]: Make MM init read MmProductType to determine what SKU of ReactOS this is, instead of assuming Server. If you want to go back to the old behavior, you need to change "WinNT" to "ServerNT" in the hivesys under Product Type.
[NTOS]: Configure the MmSystemSize variable properly based on SKU and RAM. Previously, ReactOS told all drivers and applications you were running on a system with < 13MB RAM.
[NTOS]: Initialize thresholds for low and high memory (in pages), low and high paged pool memory, and low and high nonpaged pool memory. These are described in the source.
[NTOS]: Initialize events for each of those thresholds, and populate the \KernelObject\xxxCondition events that are documented in MSDN for driver and app developers.
[NTOS]: Define some internal thresholds to use later, representing the minimum number of free pages under we go berserk, and the minimum number of free pages that we consider "plenty".
[NTOS]: Rename MiRemoveFromList to MiUnlinkFreeOrZeroedPage (Windows name). Make the function handle MmAvailablePages decrement, instead of having the caller do it.
[NTOS]: Remove run-time initialization of the PFN lists, just initialize them statically (also fixes the fact we forgot to initialize their names).
[NTOS]: Move some more initialization code to ARM3 instead of mm.
[NTOS]: Read ProductType from registry into MmProductType instead of dummy value. Remove duplicate "Mirroring" variable read.
svn path=/trunk/; revision=45638
2010-02-20 21:48:36 +00:00
|
|
|
extern PFN_NUMBER MmMaximumNonPagedPoolInPages;
|
|
|
|
extern PFN_NUMBER MmSizeOfPagedPoolInPages;
|
2009-10-04 20:19:43 +00:00
|
|
|
extern PVOID MmNonPagedSystemStart;
|
2009-06-22 08:22:41 +00:00
|
|
|
extern PVOID MmNonPagedPoolStart;
|
|
|
|
extern PVOID MmNonPagedPoolExpansionStart;
|
- Reimplement MmAllocateContiguousMemory, MmAllocateContiguousMemorySpecifyCache, MmFreeContiguousMemory, MmFreeContiguousMemorySpecifyCache:
- Use a smarter algorithm (as described here: http://www.microsoft.com/whdc/Driver/tips/ContigMem.mspx) to first try to satisfy the allocation by a simple nonpaged pool allocation (for cached requests only).
- This range is then checked for physical continuity, since it's not guaranteed for non-initial-pool allocations (and right now in ReactOS, it never is).
- As a fallback, Windows NT then attempts to scan free nonpaged pool pages. This is not yet implemented since the ReactOS nonpaged pool is not usually contiguous (to the level that NT's is).
- When the ARM pool is implemented and replaces nonpaged pool, this code path will have to be implemented.
- As a last resort, the actual PFN database is scanned for contiguous free pages.
- ReactOS used MmGetContiguousPages for this, which blindly scanned the PFN database. New MiFindContinuousPages will scan the physical memory descriptor block recently implemented, which avoids going over pages we already know are going to be unusable.
- The ReactOS function also held the PFN lock for the entire duration of the scan, which is significant on systems with large memory. Instead, we make an initial unsafe scan first, and only lock when we think we've found a correct range (and we'll then reconfirm the ranges).
- Finally, the older function actually did a double-scan to try to avoid using memory ranges under 16MB, which was useless on today's systems and also rather inefficient.
- Other than that, the actual setup of the PFN entry is copy-pasted from the old ReactOS function, so nothing's changed there -- the page still looks the same, but the selection algorithm is faster and more accurate.
- Once the pages are found, we piggyback on the new I/O mapping mechanism (which uses System PTEs) instead of doing it all over by hand as before.
- Since the underlying support is still System PTEs, once again, optimizations to that component will yield significant improvements here too.
svn path=/trunk/; revision=41657
2009-06-28 05:43:12 +00:00
|
|
|
extern PVOID MmNonPagedPoolEnd;
|
2010-07-16 00:34:26 +00:00
|
|
|
extern SIZE_T MmSizeOfPagedPoolInBytes;
|
2009-10-04 20:19:43 +00:00
|
|
|
extern PVOID MmPagedPoolStart;
|
|
|
|
extern PVOID MmPagedPoolEnd;
|
|
|
|
extern PVOID MmSessionBase;
|
2010-07-16 00:34:26 +00:00
|
|
|
extern SIZE_T MmSessionSize;
|
2009-06-23 06:39:10 +00:00
|
|
|
extern PMMPTE MmFirstReservedMappingPte, MmLastReservedMappingPte;
|
2009-06-23 07:49:39 +00:00
|
|
|
extern PMMPTE MiFirstReservedZeroingPte;
|
- Reimplement MmAllocateContiguousMemory, MmAllocateContiguousMemorySpecifyCache, MmFreeContiguousMemory, MmFreeContiguousMemorySpecifyCache:
- Use a smarter algorithm (as described here: http://www.microsoft.com/whdc/Driver/tips/ContigMem.mspx) to first try to satisfy the allocation by a simple nonpaged pool allocation (for cached requests only).
- This range is then checked for physical continuity, since it's not guaranteed for non-initial-pool allocations (and right now in ReactOS, it never is).
- As a fallback, Windows NT then attempts to scan free nonpaged pool pages. This is not yet implemented since the ReactOS nonpaged pool is not usually contiguous (to the level that NT's is).
- When the ARM pool is implemented and replaces nonpaged pool, this code path will have to be implemented.
- As a last resort, the actual PFN database is scanned for contiguous free pages.
- ReactOS used MmGetContiguousPages for this, which blindly scanned the PFN database. New MiFindContinuousPages will scan the physical memory descriptor block recently implemented, which avoids going over pages we already know are going to be unusable.
- The ReactOS function also held the PFN lock for the entire duration of the scan, which is significant on systems with large memory. Instead, we make an initial unsafe scan first, and only lock when we think we've found a correct range (and we'll then reconfirm the ranges).
- Finally, the older function actually did a double-scan to try to avoid using memory ranges under 16MB, which was useless on today's systems and also rather inefficient.
- Other than that, the actual setup of the PFN entry is copy-pasted from the old ReactOS function, so nothing's changed there -- the page still looks the same, but the selection algorithm is faster and more accurate.
- Once the pages are found, we piggyback on the new I/O mapping mechanism (which uses System PTEs) instead of doing it all over by hand as before.
- Since the underlying support is still System PTEs, once again, optimizations to that component will yield significant improvements here too.
svn path=/trunk/; revision=41657
2009-06-28 05:43:12 +00:00
|
|
|
extern MI_PFN_CACHE_ATTRIBUTE MiPlatformCacheAttributes[2][MmMaximumCacheType];
|
2009-06-27 22:16:47 +00:00
|
|
|
extern PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
|
2010-07-16 00:34:26 +00:00
|
|
|
extern SIZE_T MmBootImageSize;
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 08:29:22 +00:00
|
|
|
extern PMMPTE MmSystemPtesStart[MaximumPtePoolTypes];
|
|
|
|
extern PMMPTE MmSystemPtesEnd[MaximumPtePoolTypes];
|
2009-07-22 07:33:22 +00:00
|
|
|
extern PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
|
|
|
|
extern MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
|
2010-07-16 12:27:40 +00:00
|
|
|
extern ULONG_PTR MxPfnAllocation;
|
- Initialize the value of MmBootImageSize in ARM3 now.
- Also fix its value such that it's PDE aligned -- this makes sure that we don't step on any of the boot loader's PDE mappings and can blow everything away later.
- Initialize the MmSystem/User/Probe Addresses in ARM3 as well (no functional change).
- Print out a lot more of the VA ranges in ARM3's Phase 2 initialization. Most of the VA space is now dumped out.
- Write out the code to initialize session space VA ranges
- Image space, view space, working set space and pool space values are all calculated properly.
- NT default sizes are used, without support for registry overrides (yet).
- Also system view space is initialized and sized.
- Code is heavily commented and explained for inquisitive minds.
- Define the paged pool start address, minimum/default size, and add some extra pool header asserts/definitions.
- Define MmPagedPoolInfo to keep track of all paged pool related information (start/end PTEs, VA ranges, allocation/free bitmaps, etc).
- Fixed a lot of comments and added some new ones to provide extra clarity.
- Implement MiBuildPagedPool. It has two jobs:
- Build and create the shadow system page directory, which double-maps the System process' PDE.
- More explenations are in the comments.
- Define the paged pool region and size, and initialize MmPagedPoolInfo accordingly.
- Create and setup the paged pool allocation and free bitmaps (again explained in the comments).
- There shouldn't be any real functional change yet due to this commit.
- We need to create memory areas for session space and system view space otherwise the VA regions could get used by ReactOS instead.
svn path=/trunk/; revision=42148
2009-07-22 22:46:29 +00:00
|
|
|
extern MM_PAGED_POOL_INFO MmPagedPoolInfo;
|
2009-10-15 17:23:21 +00:00
|
|
|
extern KGUARDED_MUTEX MmPagedPoolMutex;
|
2012-03-26 07:26:36 +00:00
|
|
|
extern KGUARDED_MUTEX MmSectionCommitMutex;
|
- Fix a bug in memory area creation: Static memory areas had the static flag embedded in their type, so code that was switch()ing on the type would fail to recognize the actual type, because MEMORY_AREA_STATIC was ORed in.
- Add a new memory area type: MEMORY_AREA_OWNED_BY_ARM3. This will allow us to instruct the ReactOS Memory MAnager to "Back. The Fuck. Off." during page faults and such, so we can handle page faults inside ARM3-owned PTEs ourselves.
- Right now, all ARM3 PTEs and data is nonpaged, so no page faults should happen, but this may change in the future.
- Also will allow us to manage our own PDEs so we can do on-demand inpage instead of syncing with the ReactOS Mm hack cache.
- Create all memory areas in one shot in MmCreateSystemMemoryAreas (get rid of MiInitPageDirectoryMap and MiInitPagedPool memory area creation).
- Mark all of ours as owned by ARM3.
- Make them all static.
- The only non-ARM3 one right now is paged pool, we own all the other static areas.
- Move this code into mm, instead of mm/ARM3, since memory areas are not an ARM3 concept.
- Also create memory areas for session space, session view, and other ARM3 memory ranges, so nobody touches those ranges.
- Dump the kernel address space after all this is done, in a MmDbg function in mm.
- This cleans up ARM3 of some ROS-specific code, and also collapses Phase 1 and 2 into a single phase.
svn path=/trunk/; revision=43486
2009-10-15 18:54:35 +00:00
|
|
|
extern PVOID MmPagedPoolStart;
|
|
|
|
extern PVOID MmPagedPoolEnd;
|
|
|
|
extern PVOID MmNonPagedSystemStart;
|
|
|
|
extern PVOID MiSystemViewStart;
|
2010-07-16 00:34:26 +00:00
|
|
|
extern SIZE_T MmSystemViewSize;
|
- Fix a bug in memory area creation: Static memory areas had the static flag embedded in their type, so code that was switch()ing on the type would fail to recognize the actual type, because MEMORY_AREA_STATIC was ORed in.
- Add a new memory area type: MEMORY_AREA_OWNED_BY_ARM3. This will allow us to instruct the ReactOS Memory MAnager to "Back. The Fuck. Off." during page faults and such, so we can handle page faults inside ARM3-owned PTEs ourselves.
- Right now, all ARM3 PTEs and data is nonpaged, so no page faults should happen, but this may change in the future.
- Also will allow us to manage our own PDEs so we can do on-demand inpage instead of syncing with the ReactOS Mm hack cache.
- Create all memory areas in one shot in MmCreateSystemMemoryAreas (get rid of MiInitPageDirectoryMap and MiInitPagedPool memory area creation).
- Mark all of ours as owned by ARM3.
- Make them all static.
- The only non-ARM3 one right now is paged pool, we own all the other static areas.
- Move this code into mm, instead of mm/ARM3, since memory areas are not an ARM3 concept.
- Also create memory areas for session space, session view, and other ARM3 memory ranges, so nobody touches those ranges.
- Dump the kernel address space after all this is done, in a MmDbg function in mm.
- This cleans up ARM3 of some ROS-specific code, and also collapses Phase 1 and 2 into a single phase.
svn path=/trunk/; revision=43486
2009-10-15 18:54:35 +00:00
|
|
|
extern PVOID MmSessionBase;
|
|
|
|
extern PVOID MiSessionSpaceEnd;
|
2010-05-12 20:48:15 +00:00
|
|
|
extern PMMPTE MiSessionImagePteStart;
|
|
|
|
extern PMMPTE MiSessionImagePteEnd;
|
|
|
|
extern PMMPTE MiSessionBasePte;
|
|
|
|
extern PMMPTE MiSessionLastPte;
|
2010-07-16 00:34:26 +00:00
|
|
|
extern SIZE_T MmSizeOfPagedPoolInBytes;
|
2010-11-24 15:21:45 +00:00
|
|
|
extern PMMPDE MmSystemPagePtes;
|
2009-10-31 01:02:35 +00:00
|
|
|
extern PVOID MmSystemCacheStart;
|
|
|
|
extern PVOID MmSystemCacheEnd;
|
|
|
|
extern MMSUPPORT MmSystemCacheWs;
|
|
|
|
extern SIZE_T MmAllocatedNonPagedPool;
|
|
|
|
extern ULONG MmSpecialPoolTag;
|
2010-01-03 05:10:09 +00:00
|
|
|
extern PVOID MmHyperSpaceEnd;
|
2010-02-10 02:00:56 +00:00
|
|
|
extern PMMWSL MmSystemCacheWorkingSetList;
|
2010-07-16 00:34:26 +00:00
|
|
|
extern SIZE_T MmMinimumNonPagedPoolSize;
|
2010-02-10 02:00:56 +00:00
|
|
|
extern ULONG MmMinAdditionNonPagedPoolPerMb;
|
2010-07-16 00:34:26 +00:00
|
|
|
extern SIZE_T MmDefaultMaximumNonPagedPool;
|
2010-02-10 02:00:56 +00:00
|
|
|
extern ULONG MmMaxAdditionNonPagedPoolPerMb;
|
|
|
|
extern ULONG MmSecondaryColors;
|
|
|
|
extern ULONG MmSecondaryColorMask;
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
extern ULONG MmNumberOfSystemPtes;
|
2010-02-10 02:00:56 +00:00
|
|
|
extern ULONG MmMaximumNonPagedPoolPercent;
|
2010-02-11 00:01:32 +00:00
|
|
|
extern ULONG MmLargeStackSize;
|
2010-02-20 14:40:21 +00:00
|
|
|
extern PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
|
2012-03-26 07:26:36 +00:00
|
|
|
extern MMPFNLIST MmStandbyPageListByPriority[8];
|
[NTOS]: Make MM init read MmProductType to determine what SKU of ReactOS this is, instead of assuming Server. If you want to go back to the old behavior, you need to change "WinNT" to "ServerNT" in the hivesys under Product Type.
[NTOS]: Configure the MmSystemSize variable properly based on SKU and RAM. Previously, ReactOS told all drivers and applications you were running on a system with < 13MB RAM.
[NTOS]: Initialize thresholds for low and high memory (in pages), low and high paged pool memory, and low and high nonpaged pool memory. These are described in the source.
[NTOS]: Initialize events for each of those thresholds, and populate the \KernelObject\xxxCondition events that are documented in MSDN for driver and app developers.
[NTOS]: Define some internal thresholds to use later, representing the minimum number of free pages under we go berserk, and the minimum number of free pages that we consider "plenty".
[NTOS]: Rename MiRemoveFromList to MiUnlinkFreeOrZeroedPage (Windows name). Make the function handle MmAvailablePages decrement, instead of having the caller do it.
[NTOS]: Remove run-time initialization of the PFN lists, just initialize them statically (also fixes the fact we forgot to initialize their names).
[NTOS]: Move some more initialization code to ARM3 instead of mm.
[NTOS]: Read ProductType from registry into MmProductType instead of dummy value. Remove duplicate "Mirroring" variable read.
svn path=/trunk/; revision=45638
2010-02-20 21:48:36 +00:00
|
|
|
extern ULONG MmProductType;
|
|
|
|
extern MM_SYSTEMSIZE MmSystemSize;
|
|
|
|
extern PKEVENT MiLowMemoryEvent;
|
|
|
|
extern PKEVENT MiHighMemoryEvent;
|
|
|
|
extern PKEVENT MiLowPagedPoolEvent;
|
|
|
|
extern PKEVENT MiHighPagedPoolEvent;
|
|
|
|
extern PKEVENT MiLowNonPagedPoolEvent;
|
|
|
|
extern PKEVENT MiHighNonPagedPoolEvent;
|
|
|
|
extern PFN_NUMBER MmLowMemoryThreshold;
|
|
|
|
extern PFN_NUMBER MmHighMemoryThreshold;
|
|
|
|
extern PFN_NUMBER MiLowPagedPoolThreshold;
|
|
|
|
extern PFN_NUMBER MiHighPagedPoolThreshold;
|
|
|
|
extern PFN_NUMBER MiLowNonPagedPoolThreshold;
|
|
|
|
extern PFN_NUMBER MiHighNonPagedPoolThreshold;
|
|
|
|
extern PFN_NUMBER MmMinimumFreePages;
|
|
|
|
extern PFN_NUMBER MmPlentyFreePages;
|
2013-08-28 18:57:29 +00:00
|
|
|
extern SIZE_T MmMinimumStackCommitInBytes;
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
extern PFN_COUNT MiExpansionPoolPagesInitialCharge;
|
2010-04-20 22:47:51 +00:00
|
|
|
extern PFN_NUMBER MmResidentAvailableAtInit;
|
|
|
|
extern ULONG MmTotalFreeSystemPtes[MaximumPtePoolTypes];
|
|
|
|
extern PFN_NUMBER MmTotalSystemDriverPages;
|
2013-08-28 18:57:29 +00:00
|
|
|
extern ULONG MmCritsectTimeoutSeconds;
|
2010-04-20 22:47:51 +00:00
|
|
|
extern PVOID MiSessionImageStart;
|
|
|
|
extern PVOID MiSessionImageEnd;
|
2010-05-12 20:48:15 +00:00
|
|
|
extern PMMPTE MiHighestUserPte;
|
|
|
|
extern PMMPDE MiHighestUserPde;
|
2020-02-05 22:48:26 +00:00
|
|
|
extern PFN_NUMBER MmSystemPageDirectory[PPE_PER_PAGE];
|
2010-07-22 20:47:28 +00:00
|
|
|
extern PMMPTE MmSharedUserDataPte;
|
2010-07-24 15:30:24 +00:00
|
|
|
extern LIST_ENTRY MmProcessList;
|
2010-09-28 16:47:25 +00:00
|
|
|
extern KEVENT MmZeroingPageEvent;
|
2010-09-29 01:10:28 +00:00
|
|
|
extern ULONG MmSystemPageColor;
|
|
|
|
extern ULONG MmProcessColorSeed;
|
2010-11-08 12:35:50 +00:00
|
|
|
extern PMMWSL MmWorkingSetList;
|
2011-09-24 08:52:26 +00:00
|
|
|
extern PFN_NUMBER MiNumberOfFreePages;
|
|
|
|
extern SIZE_T MmSessionViewSize;
|
|
|
|
extern SIZE_T MmSessionPoolSize;
|
|
|
|
extern SIZE_T MmSessionImageSize;
|
|
|
|
extern PVOID MiSystemViewStart;
|
|
|
|
extern PVOID MiSessionPoolEnd; // 0xBE000000
|
|
|
|
extern PVOID MiSessionPoolStart; // 0xBD000000
|
|
|
|
extern PVOID MiSessionViewStart; // 0xBE000000
|
2012-08-01 07:54:37 +00:00
|
|
|
extern PVOID MiSessionSpaceWs;
|
2012-03-04 17:56:00 +00:00
|
|
|
extern ULONG MmMaximumDeadKernelStacks;
|
|
|
|
extern SLIST_HEADER MmDeadStackSListHead;
|
2012-03-26 07:26:36 +00:00
|
|
|
extern MM_AVL_TABLE MmSectionBasedRoot;
|
|
|
|
extern KGUARDED_MUTEX MmSectionBasedMutex;
|
|
|
|
extern PVOID MmHighSectionBase;
|
2012-08-03 11:34:35 +00:00
|
|
|
extern SIZE_T MmSystemLockPagesCount;
|
2012-08-24 06:08:20 +00:00
|
|
|
extern ULONG_PTR MmSubsectionBase;
|
2013-01-14 13:55:12 +00:00
|
|
|
extern LARGE_INTEGER MmCriticalSectionTimeout;
|
2013-11-20 23:50:42 +00:00
|
|
|
extern LIST_ENTRY MmWorkingSetExpansionHead;
|
2015-09-05 14:39:40 +00:00
|
|
|
extern KSPIN_LOCK MmExpansionLock;
|
|
|
|
extern PETHREAD MiExpansionLockOwner;
|
2009-06-22 08:22:41 +00:00
|
|
|
|
2021-03-30 16:13:59 +00:00
|
|
|
FORCEINLINE
|
|
|
|
BOOLEAN
|
|
|
|
MI_IS_PROCESS_WORKING_SET(PMMSUPPORT WorkingSet)
|
|
|
|
{
|
|
|
|
return (WorkingSet != &MmSystemCacheWs) && !WorkingSet->Flags.SessionSpace;
|
|
|
|
}
|
|
|
|
|
2011-09-30 09:30:52 +00:00
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
BOOLEAN
|
2011-09-30 09:30:52 +00:00
|
|
|
MiIsMemoryTypeFree(TYPE_OF_MEMORY MemoryType)
|
|
|
|
{
|
|
|
|
return ((MemoryType == LoaderFree) ||
|
|
|
|
(MemoryType == LoaderLoadedProgram) ||
|
|
|
|
(MemoryType == LoaderFirmwareTemporary) ||
|
|
|
|
(MemoryType == LoaderOsloaderStack));
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
BOOLEAN
|
2011-09-30 09:30:52 +00:00
|
|
|
MiIsMemoryTypeInvisible(TYPE_OF_MEMORY MemoryType)
|
|
|
|
{
|
|
|
|
return ((MemoryType == LoaderFirmwarePermanent) ||
|
|
|
|
(MemoryType == LoaderSpecialMemory) ||
|
|
|
|
(MemoryType == LoaderHALCachedMemory) ||
|
|
|
|
(MemoryType == LoaderBBTMemory));
|
|
|
|
}
|
|
|
|
|
2012-02-06 22:01:09 +00:00
|
|
|
#ifdef _M_AMD64
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
BOOLEAN
|
2012-02-06 22:01:09 +00:00
|
|
|
MiIsUserPxe(PVOID Address)
|
|
|
|
{
|
|
|
|
return ((ULONG_PTR)Address >> 7) == 0x1FFFFEDF6FB7DA0ULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
BOOLEAN
|
2012-02-06 22:01:09 +00:00
|
|
|
MiIsUserPpe(PVOID Address)
|
|
|
|
{
|
|
|
|
return ((ULONG_PTR)Address >> 16) == 0xFFFFF6FB7DA0ULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
BOOLEAN
|
2012-02-06 22:01:09 +00:00
|
|
|
MiIsUserPde(PVOID Address)
|
|
|
|
{
|
|
|
|
return ((ULONG_PTR)Address >> 25) == 0x7FFFFB7DA0ULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
BOOLEAN
|
2012-02-06 22:01:09 +00:00
|
|
|
MiIsUserPte(PVOID Address)
|
|
|
|
{
|
|
|
|
return ((ULONG_PTR)Address >> 34) == 0x3FFFFDA0ULL;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
BOOLEAN
|
2012-02-06 22:01:09 +00:00
|
|
|
MiIsUserPde(PVOID Address)
|
|
|
|
{
|
|
|
|
return ((Address >= (PVOID)MiAddressToPde(NULL)) &&
|
|
|
|
(Address <= (PVOID)MiHighestUserPde));
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
BOOLEAN
|
2012-02-06 22:01:09 +00:00
|
|
|
MiIsUserPte(PVOID Address)
|
|
|
|
{
|
2021-07-30 16:54:30 +00:00
|
|
|
return (Address >= (PVOID)PTE_BASE) && (Address <= (PVOID)MiHighestUserPte);
|
2012-02-06 22:01:09 +00:00
|
|
|
}
|
|
|
|
#endif
|
2011-09-30 09:30:52 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
//
|
|
|
|
// Figures out the hardware bits for a PTE
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
ULONG_PTR
|
2010-11-23 16:34:30 +00:00
|
|
|
MiDetermineUserGlobalPteMask(IN PVOID PointerPte)
|
2010-07-22 02:20:27 +00:00
|
|
|
{
|
|
|
|
MMPTE TempPte;
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Start fresh */
|
|
|
|
TempPte.u.Long = 0;
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Make it valid and accessed */
|
|
|
|
TempPte.u.Hard.Valid = TRUE;
|
2010-11-23 16:34:30 +00:00
|
|
|
MI_MAKE_ACCESSED_PAGE(&TempPte);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Is this for user-mode? */
|
2012-02-06 22:01:09 +00:00
|
|
|
if (
|
|
|
|
#if (_MI_PAGING_LEVELS == 4)
|
|
|
|
MiIsUserPxe(PointerPte) ||
|
|
|
|
#endif
|
|
|
|
#if (_MI_PAGING_LEVELS >= 3)
|
|
|
|
MiIsUserPpe(PointerPte) ||
|
|
|
|
#endif
|
|
|
|
MiIsUserPde(PointerPte) ||
|
|
|
|
MiIsUserPte(PointerPte))
|
2010-07-22 02:20:27 +00:00
|
|
|
{
|
|
|
|
/* Set the owner bit */
|
2010-11-23 16:34:30 +00:00
|
|
|
MI_MAKE_OWNER_PAGE(&TempPte);
|
2010-07-22 02:20:27 +00:00
|
|
|
}
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* FIXME: We should also set the global bit */
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Return the protection */
|
|
|
|
return TempPte.u.Long;
|
|
|
|
}
|
|
|
|
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
//
|
|
|
|
// Creates a valid kernel PTE with the given protection
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
2010-07-22 02:20:27 +00:00
|
|
|
MI_MAKE_HARDWARE_PTE_KERNEL(IN PMMPTE NewPte,
|
|
|
|
IN PMMPTE MappingPte,
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
IN ULONG_PTR ProtectionMask,
|
2010-07-22 02:20:27 +00:00
|
|
|
IN PFN_NUMBER PageFrameNumber)
|
|
|
|
{
|
|
|
|
/* Only valid for kernel, non-session PTEs */
|
|
|
|
ASSERT(MappingPte > MiHighestUserPte);
|
|
|
|
ASSERT(!MI_IS_SESSION_PTE(MappingPte));
|
|
|
|
ASSERT((MappingPte < (PMMPTE)PDE_BASE) || (MappingPte > (PMMPTE)PDE_TOP));
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2021-05-27 17:17:13 +00:00
|
|
|
/* Check that we are not setting valid a page that should not be */
|
|
|
|
ASSERT(ProtectionMask & MM_PROTECT_ACCESS);
|
|
|
|
ASSERT((ProtectionMask & MM_GUARDPAGE) == 0);
|
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Start fresh */
|
2021-05-27 17:17:13 +00:00
|
|
|
NewPte->u.Long = 0;
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Set the protection and page */
|
|
|
|
NewPte->u.Hard.PageFrameNumber = PageFrameNumber;
|
|
|
|
NewPte->u.Long |= MmProtectToPteMask[ProtectionMask];
|
2021-05-27 17:17:13 +00:00
|
|
|
|
|
|
|
/* Make this valid & global */
|
|
|
|
#ifdef _GLOBAL_PAGES_ARE_AWESOME_
|
|
|
|
if (KeFeatureBits & KF_GLOBAL_PAGE)
|
|
|
|
NewPte->u.Hard.Global = 1;
|
|
|
|
#endif
|
|
|
|
NewPte->u.Hard.Valid = 1;
|
2010-07-22 02:20:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Creates a valid PTE with the given protection
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
MI_MAKE_HARDWARE_PTE(IN PMMPTE NewPte,
|
|
|
|
IN PMMPTE MappingPte,
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
IN ULONG_PTR ProtectionMask,
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
IN PFN_NUMBER PageFrameNumber)
|
2010-07-22 02:20:27 +00:00
|
|
|
{
|
2021-05-27 17:17:13 +00:00
|
|
|
/* Check that we are not setting valid a page that should not be */
|
|
|
|
ASSERT(ProtectionMask & MM_PROTECT_ACCESS);
|
|
|
|
ASSERT((ProtectionMask & MM_GUARDPAGE) == 0);
|
|
|
|
|
2010-07-22 02:20:27 +00:00
|
|
|
/* Set the protection and page */
|
|
|
|
NewPte->u.Long = MiDetermineUserGlobalPteMask(MappingPte);
|
|
|
|
NewPte->u.Long |= MmProtectToPteMask[ProtectionMask];
|
|
|
|
NewPte->u.Hard.PageFrameNumber = PageFrameNumber;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Creates a valid user PTE with the given protection
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MI_MAKE_HARDWARE_PTE_USER(IN PMMPTE NewPte,
|
|
|
|
IN PMMPTE MappingPte,
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
IN ULONG_PTR ProtectionMask,
|
2010-07-22 02:20:27 +00:00
|
|
|
IN PFN_NUMBER PageFrameNumber)
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
{
|
|
|
|
/* Only valid for kernel, non-session PTEs */
|
2010-07-22 02:20:27 +00:00
|
|
|
ASSERT(MappingPte <= MiHighestUserPte);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
/* Start fresh */
|
2014-05-18 14:59:31 +00:00
|
|
|
NewPte->u.Long = 0;
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2021-05-27 17:17:13 +00:00
|
|
|
/* Check that we are not setting valid a page that should not be */
|
|
|
|
ASSERT(ProtectionMask & MM_PROTECT_ACCESS);
|
|
|
|
ASSERT((ProtectionMask & MM_GUARDPAGE) == 0);
|
|
|
|
|
2014-05-18 14:59:31 +00:00
|
|
|
NewPte->u.Hard.Valid = TRUE;
|
2010-07-22 02:20:27 +00:00
|
|
|
NewPte->u.Hard.Owner = TRUE;
|
Testers: Please test this build.
[NTOS]: Implement a MI_MAKE_HARDWARE_PTE macro for the generation of valid kernel PTEs instead of always taking the ValidKernelPte and changing its flags. This macro will take into account the protection mask (up until now ignored) and use the array previously implemented to determine the correct hardware PTE settings. Assertions are also added to validate correct usage of the macro, and later revisions will fill out NT-specific fields to help deal with transition PTEs, page faults, etc.
[NTOS]: Make the stack code the first user of this macro, for the stack PTEs. Good testing base as we create kernel stacks very often.
[NTOS]: The NT MM ABI specifies that in between the allocation of a new PTE and its initialization as a valid PFN, the PTE entry should be an invalid PTE, and should only be marked valid after the PFN has been initialized. For stack PTEs, do this -- first allocating the page, making it invalid, then initializing the PFN, and then writing the valid page.
svn path=/trunk/; revision=47571
2010-06-04 17:49:36 +00:00
|
|
|
NewPte->u.Hard.PageFrameNumber = PageFrameNumber;
|
|
|
|
NewPte->u.Long |= MmProtectToPteMask[ProtectionMask];
|
|
|
|
}
|
|
|
|
|
2010-11-24 15:21:45 +00:00
|
|
|
#ifndef _M_AMD64
|
2010-10-04 18:34:41 +00:00
|
|
|
//
|
|
|
|
// Builds a Prototype PTE for the address of the PTE
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MI_MAKE_PROTOTYPE_PTE(IN PMMPTE NewPte,
|
|
|
|
IN PMMPTE PointerPte)
|
|
|
|
{
|
|
|
|
ULONG_PTR Offset;
|
|
|
|
|
|
|
|
/* Mark this as a prototype */
|
|
|
|
NewPte->u.Long = 0;
|
|
|
|
NewPte->u.Proto.Prototype = 1;
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-10-04 18:34:41 +00:00
|
|
|
/*
|
|
|
|
* Prototype PTEs are only valid in paged pool by design, this little trick
|
2012-08-24 06:08:20 +00:00
|
|
|
* lets us only use 30 bits for the adress of the PTE, as long as the area
|
|
|
|
* stays 1024MB At most.
|
2010-10-04 18:34:41 +00:00
|
|
|
*/
|
|
|
|
Offset = (ULONG_PTR)PointerPte - (ULONG_PTR)MmPagedPoolStart;
|
2010-10-19 18:57:30 +00:00
|
|
|
|
2012-08-24 06:08:20 +00:00
|
|
|
/*
|
|
|
|
* 7 bits go in the "low" (but we assume the bottom 2 are zero)
|
|
|
|
* and the other 21 bits go in the "high"
|
|
|
|
*/
|
|
|
|
NewPte->u.Proto.ProtoAddressLow = (Offset & 0x1FC) >> 2;
|
|
|
|
NewPte->u.Proto.ProtoAddressHigh = (Offset & 0x3FFFFE00) >> 9;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Builds a Subsection PTE for the address of the Segment
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MI_MAKE_SUBSECTION_PTE(IN PMMPTE NewPte,
|
|
|
|
IN PVOID Segment)
|
|
|
|
{
|
|
|
|
ULONG_PTR Offset;
|
|
|
|
|
|
|
|
/* Mark this as a prototype */
|
|
|
|
NewPte->u.Long = 0;
|
|
|
|
NewPte->u.Subsect.Prototype = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Segments are only valid either in nonpaged pool. We store the 20 bit
|
|
|
|
* difference either from the top or bottom of nonpaged pool, giving a
|
|
|
|
* maximum of 128MB to each delta, meaning nonpaged pool cannot exceed
|
|
|
|
* 256MB.
|
|
|
|
*/
|
|
|
|
if ((ULONG_PTR)Segment < ((ULONG_PTR)MmSubsectionBase + (128 * _1MB)))
|
|
|
|
{
|
|
|
|
Offset = (ULONG_PTR)Segment - (ULONG_PTR)MmSubsectionBase;
|
|
|
|
NewPte->u.Subsect.WhichPool = PagedPool;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
Offset = (ULONG_PTR)MmNonPagedPoolEnd - (ULONG_PTR)Segment;
|
|
|
|
NewPte->u.Subsect.WhichPool = NonPagedPool;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 4 bits go in the "low" (but we assume the bottom 3 are zero)
|
|
|
|
* and the other 20 bits go in the "high"
|
|
|
|
*/
|
|
|
|
NewPte->u.Subsect.SubsectionAddressLow = (Offset & 0x78) >> 3;
|
|
|
|
NewPte->u.Subsect.SubsectionAddressHigh = (Offset & 0xFFFFF80) >> 7;
|
2010-10-04 18:34:41 +00:00
|
|
|
}
|
2012-08-24 06:08:20 +00:00
|
|
|
|
2013-11-23 22:34:20 +00:00
|
|
|
FORCEINLINE
|
|
|
|
BOOLEAN
|
|
|
|
MI_IS_MAPPED_PTE(PMMPTE PointerPte)
|
|
|
|
{
|
|
|
|
/// \todo Make this reasonable code, this is UGLY!
|
|
|
|
return ((PointerPte->u.Long & 0xFFFFFC01) != 0);
|
|
|
|
}
|
|
|
|
|
2010-10-06 12:16:51 +00:00
|
|
|
#endif
|
2010-10-04 18:34:41 +00:00
|
|
|
|
2016-08-19 17:24:53 +00:00
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MI_MAKE_TRANSITION_PTE(_Out_ PMMPTE NewPte,
|
|
|
|
_In_ PFN_NUMBER Page,
|
|
|
|
_In_ ULONG Protection)
|
|
|
|
{
|
|
|
|
NewPte->u.Long = 0;
|
|
|
|
NewPte->u.Trans.Transition = 1;
|
|
|
|
NewPte->u.Trans.Protection = Protection;
|
|
|
|
NewPte->u.Trans.PageFrameNumber = Page;
|
|
|
|
}
|
|
|
|
|
2010-05-12 20:48:15 +00:00
|
|
|
//
|
|
|
|
// Returns if the page is physically resident (ie: a large page)
|
|
|
|
// FIXFIX: CISC/x86 only?
|
|
|
|
//
|
2010-04-20 22:47:51 +00:00
|
|
|
FORCEINLINE
|
|
|
|
BOOLEAN
|
|
|
|
MI_IS_PHYSICAL_ADDRESS(IN PVOID Address)
|
|
|
|
{
|
|
|
|
PMMPDE PointerPde;
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-04-20 22:47:51 +00:00
|
|
|
/* Large pages are never paged out, always physically resident */
|
|
|
|
PointerPde = MiAddressToPde(Address);
|
|
|
|
return ((PointerPde->u.Hard.LargePage) && (PointerPde->u.Hard.Valid));
|
|
|
|
}
|
|
|
|
|
2010-06-06 18:45:46 +00:00
|
|
|
//
|
|
|
|
// Writes a valid PTE
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
VOID
|
2010-06-06 18:45:46 +00:00
|
|
|
MI_WRITE_VALID_PTE(IN PMMPTE PointerPte,
|
|
|
|
IN MMPTE TempPte)
|
|
|
|
{
|
|
|
|
/* Write the valid PTE */
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 0);
|
|
|
|
ASSERT(TempPte.u.Hard.Valid == 1);
|
2018-03-21 20:22:03 +00:00
|
|
|
#if _M_AMD64
|
|
|
|
ASSERT(!MI_IS_PAGE_TABLE_ADDRESS(MiPteToAddress(PointerPte)) ||
|
|
|
|
(TempPte.u.Hard.NoExecute == 0));
|
|
|
|
#endif
|
2010-06-06 18:45:46 +00:00
|
|
|
*PointerPte = TempPte;
|
|
|
|
}
|
|
|
|
|
2013-11-27 00:04:26 +00:00
|
|
|
//
|
|
|
|
// Updates a valid PTE
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
2014-01-16 16:56:30 +00:00
|
|
|
VOID
|
2013-11-27 00:04:26 +00:00
|
|
|
MI_UPDATE_VALID_PTE(IN PMMPTE PointerPte,
|
|
|
|
IN MMPTE TempPte)
|
|
|
|
{
|
|
|
|
/* Write the valid PTE */
|
|
|
|
ASSERT(PointerPte->u.Hard.Valid == 1);
|
|
|
|
ASSERT(TempPte.u.Hard.Valid == 1);
|
|
|
|
ASSERT(PointerPte->u.Hard.PageFrameNumber == TempPte.u.Hard.PageFrameNumber);
|
|
|
|
*PointerPte = TempPte;
|
|
|
|
}
|
|
|
|
|
2010-06-06 18:45:46 +00:00
|
|
|
//
|
|
|
|
// Writes an invalid PTE
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
VOID
|
2010-06-06 18:45:46 +00:00
|
|
|
MI_WRITE_INVALID_PTE(IN PMMPTE PointerPte,
|
|
|
|
IN MMPTE InvalidPte)
|
|
|
|
{
|
|
|
|
/* Write the invalid PTE */
|
|
|
|
ASSERT(InvalidPte.u.Hard.Valid == 0);
|
2013-11-27 00:04:26 +00:00
|
|
|
ASSERT(InvalidPte.u.Long != 0);
|
2010-06-06 18:45:46 +00:00
|
|
|
*PointerPte = InvalidPte;
|
|
|
|
}
|
|
|
|
|
2013-11-27 00:04:26 +00:00
|
|
|
//
|
|
|
|
// Erase the PTE completely
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
2014-01-16 16:56:30 +00:00
|
|
|
VOID
|
2013-11-27 00:04:26 +00:00
|
|
|
MI_ERASE_PTE(IN PMMPTE PointerPte)
|
|
|
|
{
|
|
|
|
/* Zero out the PTE */
|
|
|
|
ASSERT(PointerPte->u.Long != 0);
|
|
|
|
PointerPte->u.Long = 0;
|
|
|
|
}
|
|
|
|
|
2010-11-24 15:21:45 +00:00
|
|
|
//
|
|
|
|
// Writes a valid PDE
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
VOID
|
2010-11-24 15:21:45 +00:00
|
|
|
MI_WRITE_VALID_PDE(IN PMMPDE PointerPde,
|
|
|
|
IN MMPDE TempPde)
|
|
|
|
{
|
|
|
|
/* Write the valid PDE */
|
|
|
|
ASSERT(PointerPde->u.Hard.Valid == 0);
|
2018-03-21 20:22:03 +00:00
|
|
|
#ifdef _M_AMD64
|
|
|
|
ASSERT(PointerPde->u.Hard.NoExecute == 0);
|
|
|
|
#endif
|
2010-11-24 15:21:45 +00:00
|
|
|
ASSERT(TempPde.u.Hard.Valid == 1);
|
|
|
|
*PointerPde = TempPde;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Writes an invalid PDE
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
VOID
|
2010-11-24 15:21:45 +00:00
|
|
|
MI_WRITE_INVALID_PDE(IN PMMPDE PointerPde,
|
|
|
|
IN MMPDE InvalidPde)
|
|
|
|
{
|
|
|
|
/* Write the invalid PDE */
|
|
|
|
ASSERT(InvalidPde.u.Hard.Valid == 0);
|
2013-11-27 00:04:26 +00:00
|
|
|
ASSERT(InvalidPde.u.Long != 0);
|
2018-03-21 20:22:03 +00:00
|
|
|
#ifdef _M_AMD64
|
|
|
|
ASSERT(InvalidPde.u.Soft.Protection == MM_EXECUTE_READWRITE);
|
|
|
|
#endif
|
2010-11-24 15:21:45 +00:00
|
|
|
*PointerPde = InvalidPde;
|
|
|
|
}
|
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
//
|
|
|
|
// Checks if the thread already owns a working set
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
BOOLEAN
|
|
|
|
MM_ANY_WS_LOCK_HELD(IN PETHREAD Thread)
|
|
|
|
{
|
|
|
|
/* If any of these are held, return TRUE */
|
|
|
|
return ((Thread->OwnsProcessWorkingSetExclusive) ||
|
|
|
|
(Thread->OwnsProcessWorkingSetShared) ||
|
|
|
|
(Thread->OwnsSystemWorkingSetExclusive) ||
|
|
|
|
(Thread->OwnsSystemWorkingSetShared) ||
|
|
|
|
(Thread->OwnsSessionWorkingSetExclusive) ||
|
|
|
|
(Thread->OwnsSessionWorkingSetShared));
|
|
|
|
}
|
|
|
|
|
2021-05-11 10:03:52 +00:00
|
|
|
FORCEINLINE
|
|
|
|
BOOLEAN
|
|
|
|
MM_ANY_WS_LOCK_HELD_EXCLUSIVE(_In_ PETHREAD Thread)
|
|
|
|
{
|
|
|
|
return ((Thread->OwnsProcessWorkingSetExclusive) ||
|
|
|
|
(Thread->OwnsSystemWorkingSetExclusive) ||
|
|
|
|
(Thread->OwnsSessionWorkingSetExclusive));
|
|
|
|
}
|
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
//
|
|
|
|
// Checks if the process owns the working set lock
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
BOOLEAN
|
|
|
|
MI_WS_OWNER(IN PEPROCESS Process)
|
|
|
|
{
|
|
|
|
/* Check if this process is the owner, and that the thread owns the WS */
|
2012-09-03 06:23:31 +00:00
|
|
|
if (PsGetCurrentThread()->OwnsProcessWorkingSetExclusive == 0)
|
|
|
|
{
|
2014-05-22 10:08:44 +00:00
|
|
|
DPRINT("Thread: %p is not an owner\n", PsGetCurrentThread());
|
2012-09-03 06:23:31 +00:00
|
|
|
}
|
|
|
|
if (KeGetCurrentThread()->ApcState.Process != &Process->Pcb)
|
|
|
|
{
|
2014-05-22 10:08:44 +00:00
|
|
|
DPRINT("Current thread %p is attached to another process %p\n", PsGetCurrentThread(), Process);
|
2012-09-03 06:23:31 +00:00
|
|
|
}
|
2010-07-22 18:26:04 +00:00
|
|
|
return ((KeGetCurrentThread()->ApcState.Process == &Process->Pcb) &&
|
|
|
|
((PsGetCurrentThread()->OwnsProcessWorkingSetExclusive) ||
|
|
|
|
(PsGetCurrentThread()->OwnsProcessWorkingSetShared)));
|
|
|
|
}
|
|
|
|
|
2012-08-03 11:34:35 +00:00
|
|
|
//
|
|
|
|
// New ARM3<->RosMM PAGE Architecture
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
BOOLEAN
|
2021-03-26 08:32:34 +00:00
|
|
|
MiIsRosSectionObject(IN PSECTION Section)
|
2012-08-03 11:34:35 +00:00
|
|
|
{
|
2021-03-26 08:32:34 +00:00
|
|
|
return Section->u.Flags.filler;
|
2012-08-03 11:34:35 +00:00
|
|
|
}
|
|
|
|
|
2014-02-08 15:54:38 +00:00
|
|
|
#define MI_IS_ROS_PFN(x) ((x)->u4.AweAllocation == TRUE)
|
2012-08-03 11:34:35 +00:00
|
|
|
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiDecrementReferenceCount(
|
|
|
|
IN PMMPFN Pfn1,
|
|
|
|
IN PFN_NUMBER PageFrameIndex
|
|
|
|
);
|
|
|
|
|
2012-09-03 06:23:31 +00:00
|
|
|
FORCEINLINE
|
|
|
|
BOOLEAN
|
|
|
|
MI_IS_WS_UNSAFE(IN PEPROCESS Process)
|
|
|
|
{
|
|
|
|
return (Process->Vm.Flags.AcquiredUnsafe == TRUE);
|
|
|
|
}
|
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
//
|
|
|
|
// Locks the working set for the given process
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiLockProcessWorkingSet(IN PEPROCESS Process,
|
|
|
|
IN PETHREAD Thread)
|
|
|
|
{
|
|
|
|
/* Shouldn't already be owning the process working set */
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetShared == FALSE);
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
|
|
|
|
|
|
|
|
/* Block APCs, make sure that still nothing is already held */
|
|
|
|
KeEnterGuardedRegion();
|
|
|
|
ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
|
|
|
|
|
2012-09-03 06:23:31 +00:00
|
|
|
/* Lock the working set */
|
|
|
|
ExAcquirePushLockExclusive(&Process->Vm.WorkingSetMutex);
|
|
|
|
|
|
|
|
/* Now claim that we own the lock */
|
|
|
|
ASSERT(!MI_IS_WS_UNSAFE(Process));
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
|
|
|
|
Thread->OwnsProcessWorkingSetExclusive = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiLockProcessWorkingSetShared(IN PEPROCESS Process,
|
|
|
|
IN PETHREAD Thread)
|
|
|
|
{
|
|
|
|
/* Shouldn't already be owning the process working set */
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetShared == FALSE);
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
|
|
|
|
|
|
|
|
/* Block APCs, make sure that still nothing is already held */
|
|
|
|
KeEnterGuardedRegion();
|
|
|
|
ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
|
|
|
|
|
|
|
|
/* Lock the working set */
|
|
|
|
ExAcquirePushLockShared(&Process->Vm.WorkingSetMutex);
|
|
|
|
|
|
|
|
/* Now claim that we own the lock */
|
|
|
|
ASSERT(!MI_IS_WS_UNSAFE(Process));
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetShared == FALSE);
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
|
|
|
|
Thread->OwnsProcessWorkingSetShared = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiLockProcessWorkingSetUnsafe(IN PEPROCESS Process,
|
|
|
|
IN PETHREAD Thread)
|
|
|
|
{
|
|
|
|
/* Shouldn't already be owning the process working set */
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
|
2010-07-22 18:26:04 +00:00
|
|
|
|
2012-09-03 06:23:31 +00:00
|
|
|
/* APCs must be blocked, make sure that still nothing is already held */
|
|
|
|
ASSERT(KeAreAllApcsDisabled() == TRUE);
|
|
|
|
ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
|
|
|
|
|
|
|
|
/* Lock the working set */
|
|
|
|
ExAcquirePushLockExclusive(&Process->Vm.WorkingSetMutex);
|
2010-07-22 18:26:04 +00:00
|
|
|
|
2012-09-03 06:23:31 +00:00
|
|
|
/* Now claim that we own the lock */
|
|
|
|
ASSERT(!MI_IS_WS_UNSAFE(Process));
|
|
|
|
Process->Vm.Flags.AcquiredUnsafe = 1;
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
|
2010-07-22 18:26:04 +00:00
|
|
|
Thread->OwnsProcessWorkingSetExclusive = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Unlocks the working set for the given process
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiUnlockProcessWorkingSet(IN PEPROCESS Process,
|
|
|
|
IN PETHREAD Thread)
|
|
|
|
{
|
2012-09-03 06:23:31 +00:00
|
|
|
/* Make sure we are the owner of a safe acquisition */
|
2010-07-22 18:26:04 +00:00
|
|
|
ASSERT(MI_WS_OWNER(Process));
|
2012-09-03 06:23:31 +00:00
|
|
|
ASSERT(!MI_IS_WS_UNSAFE(Process));
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
/* The thread doesn't own it anymore */
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetExclusive == TRUE);
|
|
|
|
Thread->OwnsProcessWorkingSetExclusive = FALSE;
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2012-09-03 06:23:31 +00:00
|
|
|
/* Release the lock and re-enable APCs */
|
|
|
|
ExReleasePushLockExclusive(&Process->Vm.WorkingSetMutex);
|
2010-07-22 18:26:04 +00:00
|
|
|
KeLeaveGuardedRegion();
|
|
|
|
}
|
|
|
|
|
2013-11-05 22:03:42 +00:00
|
|
|
//
|
|
|
|
// Unlocks the working set for the given process
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiUnlockProcessWorkingSetShared(IN PEPROCESS Process,
|
|
|
|
IN PETHREAD Thread)
|
|
|
|
{
|
|
|
|
/* Make sure we are the owner of a safe acquisition (because shared) */
|
|
|
|
ASSERT(MI_WS_OWNER(Process));
|
|
|
|
ASSERT(!MI_IS_WS_UNSAFE(Process));
|
|
|
|
|
|
|
|
/* Ensure we are in a shared acquisition */
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetShared == TRUE);
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
|
|
|
|
|
|
|
|
/* Don't claim the lock anylonger */
|
|
|
|
Thread->OwnsProcessWorkingSetShared = FALSE;
|
|
|
|
|
|
|
|
/* Release the lock and re-enable APCs */
|
|
|
|
ExReleasePushLockShared(&Process->Vm.WorkingSetMutex);
|
|
|
|
KeLeaveGuardedRegion();
|
|
|
|
}
|
|
|
|
|
2012-09-03 06:23:31 +00:00
|
|
|
//
|
|
|
|
// Unlocks the working set for the given process
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiUnlockProcessWorkingSetUnsafe(IN PEPROCESS Process,
|
|
|
|
IN PETHREAD Thread)
|
|
|
|
{
|
|
|
|
/* Make sure we are the owner of an unsafe acquisition */
|
|
|
|
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
|
|
|
|
ASSERT(KeAreAllApcsDisabled() == TRUE);
|
|
|
|
ASSERT(MI_WS_OWNER(Process));
|
|
|
|
ASSERT(MI_IS_WS_UNSAFE(Process));
|
|
|
|
|
|
|
|
/* No longer unsafe */
|
|
|
|
Process->Vm.Flags.AcquiredUnsafe = 0;
|
|
|
|
|
|
|
|
/* The thread doesn't own it anymore */
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetExclusive == TRUE);
|
|
|
|
Thread->OwnsProcessWorkingSetExclusive = FALSE;
|
|
|
|
|
|
|
|
/* Release the lock but don't touch APC state */
|
|
|
|
ExReleasePushLockExclusive(&Process->Vm.WorkingSetMutex);
|
|
|
|
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
|
|
|
|
}
|
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
//
|
|
|
|
// Locks the working set
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiLockWorkingSet(IN PETHREAD Thread,
|
|
|
|
IN PMMSUPPORT WorkingSet)
|
|
|
|
{
|
|
|
|
/* Block APCs */
|
|
|
|
KeEnterGuardedRegion();
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
/* Working set should be in global memory */
|
|
|
|
ASSERT(MI_IS_SESSION_ADDRESS((PVOID)WorkingSet) == FALSE);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
/* Thread shouldn't already be owning something */
|
|
|
|
ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2012-09-03 06:23:31 +00:00
|
|
|
/* Lock this working set */
|
|
|
|
ExAcquirePushLockExclusive(&WorkingSet->WorkingSetMutex);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
/* Which working set is this? */
|
|
|
|
if (WorkingSet == &MmSystemCacheWs)
|
|
|
|
{
|
|
|
|
/* Own the system working set */
|
|
|
|
ASSERT((Thread->OwnsSystemWorkingSetExclusive == FALSE) &&
|
|
|
|
(Thread->OwnsSystemWorkingSetShared == FALSE));
|
|
|
|
Thread->OwnsSystemWorkingSetExclusive = TRUE;
|
|
|
|
}
|
|
|
|
else if (WorkingSet->Flags.SessionSpace)
|
|
|
|
{
|
2012-08-01 07:54:37 +00:00
|
|
|
/* Own the session working set */
|
|
|
|
ASSERT((Thread->OwnsSessionWorkingSetExclusive == FALSE) &&
|
|
|
|
(Thread->OwnsSessionWorkingSetShared == FALSE));
|
2012-08-03 11:34:35 +00:00
|
|
|
Thread->OwnsSessionWorkingSetExclusive = TRUE;
|
2010-07-22 18:26:04 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Own the process working set */
|
|
|
|
ASSERT((Thread->OwnsProcessWorkingSetExclusive == FALSE) &&
|
|
|
|
(Thread->OwnsProcessWorkingSetShared == FALSE));
|
|
|
|
Thread->OwnsProcessWorkingSetExclusive = TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-22 13:24:33 +00:00
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiLockWorkingSetShared(
|
|
|
|
_In_ PETHREAD Thread,
|
|
|
|
_In_ PMMSUPPORT WorkingSet)
|
|
|
|
{
|
|
|
|
/* Block APCs */
|
|
|
|
KeEnterGuardedRegion();
|
|
|
|
|
|
|
|
/* Working set should be in global memory */
|
|
|
|
ASSERT(MI_IS_SESSION_ADDRESS((PVOID)WorkingSet) == FALSE);
|
|
|
|
|
|
|
|
/* Thread shouldn't already be owning something */
|
|
|
|
ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
|
|
|
|
|
|
|
|
/* Lock this working set */
|
|
|
|
ExAcquirePushLockShared(&WorkingSet->WorkingSetMutex);
|
|
|
|
|
|
|
|
/* Which working set is this? */
|
|
|
|
if (WorkingSet == &MmSystemCacheWs)
|
|
|
|
{
|
|
|
|
/* Own the system working set */
|
|
|
|
ASSERT((Thread->OwnsSystemWorkingSetExclusive == FALSE) &&
|
|
|
|
(Thread->OwnsSystemWorkingSetShared == FALSE));
|
|
|
|
Thread->OwnsSystemWorkingSetShared = TRUE;
|
|
|
|
}
|
|
|
|
else if (WorkingSet->Flags.SessionSpace)
|
|
|
|
{
|
|
|
|
/* Own the session working set */
|
|
|
|
ASSERT((Thread->OwnsSessionWorkingSetExclusive == FALSE) &&
|
|
|
|
(Thread->OwnsSessionWorkingSetShared == FALSE));
|
|
|
|
Thread->OwnsSessionWorkingSetShared = TRUE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Own the process working set */
|
|
|
|
ASSERT((Thread->OwnsProcessWorkingSetExclusive == FALSE) &&
|
|
|
|
(Thread->OwnsProcessWorkingSetShared == FALSE));
|
|
|
|
Thread->OwnsProcessWorkingSetShared = TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
//
|
|
|
|
// Unlocks the working set
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiUnlockWorkingSet(IN PETHREAD Thread,
|
|
|
|
IN PMMSUPPORT WorkingSet)
|
|
|
|
{
|
|
|
|
/* Working set should be in global memory */
|
|
|
|
ASSERT(MI_IS_SESSION_ADDRESS((PVOID)WorkingSet) == FALSE);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-07-22 18:26:04 +00:00
|
|
|
/* Which working set is this? */
|
|
|
|
if (WorkingSet == &MmSystemCacheWs)
|
|
|
|
{
|
|
|
|
/* Release the system working set */
|
2020-10-22 13:24:33 +00:00
|
|
|
ASSERT((Thread->OwnsSystemWorkingSetExclusive == TRUE) &&
|
|
|
|
(Thread->OwnsSystemWorkingSetShared == FALSE));
|
2010-07-22 18:26:04 +00:00
|
|
|
Thread->OwnsSystemWorkingSetExclusive = FALSE;
|
|
|
|
}
|
|
|
|
else if (WorkingSet->Flags.SessionSpace)
|
|
|
|
{
|
2012-08-01 07:54:37 +00:00
|
|
|
/* Release the session working set */
|
2020-10-22 13:24:33 +00:00
|
|
|
ASSERT((Thread->OwnsSessionWorkingSetExclusive == TRUE) &&
|
|
|
|
(Thread->OwnsSessionWorkingSetShared == FALSE));
|
|
|
|
Thread->OwnsSessionWorkingSetExclusive = FALSE;
|
2010-07-22 18:26:04 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Release the process working set */
|
2020-10-22 13:24:33 +00:00
|
|
|
ASSERT((Thread->OwnsProcessWorkingSetExclusive == TRUE) &&
|
|
|
|
(Thread->OwnsProcessWorkingSetShared == FALSE));
|
2010-07-22 18:26:04 +00:00
|
|
|
Thread->OwnsProcessWorkingSetExclusive = FALSE;
|
|
|
|
}
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2012-09-03 06:23:31 +00:00
|
|
|
/* Release the working set lock */
|
|
|
|
ExReleasePushLockExclusive(&WorkingSet->WorkingSetMutex);
|
2010-07-22 18:26:04 +00:00
|
|
|
|
|
|
|
/* Unblock APCs */
|
|
|
|
KeLeaveGuardedRegion();
|
|
|
|
}
|
|
|
|
|
2020-10-22 13:24:33 +00:00
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiUnlockWorkingSetShared(
|
|
|
|
_In_ PETHREAD Thread,
|
|
|
|
_In_ PMMSUPPORT WorkingSet)
|
|
|
|
{
|
|
|
|
/* Working set should be in global memory */
|
|
|
|
ASSERT(MI_IS_SESSION_ADDRESS((PVOID)WorkingSet) == FALSE);
|
|
|
|
|
|
|
|
/* Which working set is this? */
|
|
|
|
if (WorkingSet == &MmSystemCacheWs)
|
|
|
|
{
|
|
|
|
/* Release the system working set */
|
|
|
|
ASSERT((Thread->OwnsSystemWorkingSetExclusive == FALSE) &&
|
|
|
|
(Thread->OwnsSystemWorkingSetShared == TRUE));
|
|
|
|
Thread->OwnsSystemWorkingSetShared = FALSE;
|
|
|
|
}
|
|
|
|
else if (WorkingSet->Flags.SessionSpace)
|
|
|
|
{
|
|
|
|
/* Release the session working set */
|
|
|
|
ASSERT((Thread->OwnsSessionWorkingSetExclusive == FALSE) &&
|
|
|
|
(Thread->OwnsSessionWorkingSetShared == TRUE));
|
|
|
|
Thread->OwnsSessionWorkingSetShared = FALSE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Release the process working set */
|
|
|
|
ASSERT((Thread->OwnsProcessWorkingSetExclusive == FALSE) &&
|
|
|
|
(Thread->OwnsProcessWorkingSetShared == TRUE));
|
|
|
|
Thread->OwnsProcessWorkingSetShared = FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release the working set lock */
|
|
|
|
ExReleasePushLockShared(&WorkingSet->WorkingSetMutex);
|
|
|
|
|
|
|
|
/* Unblock APCs */
|
|
|
|
KeLeaveGuardedRegion();
|
|
|
|
}
|
|
|
|
|
2021-02-23 16:21:26 +00:00
|
|
|
FORCEINLINE
|
|
|
|
BOOLEAN
|
|
|
|
MiConvertSharedWorkingSetLockToExclusive(
|
|
|
|
_In_ PETHREAD Thread,
|
|
|
|
_In_ PMMSUPPORT Vm)
|
|
|
|
{
|
|
|
|
/* Sanity check: No exclusive lock. */
|
|
|
|
ASSERT(!Thread->OwnsProcessWorkingSetExclusive);
|
|
|
|
ASSERT(!Thread->OwnsSessionWorkingSetExclusive);
|
|
|
|
ASSERT(!Thread->OwnsSystemWorkingSetExclusive);
|
|
|
|
|
|
|
|
/* And it should have one and only one shared lock */
|
|
|
|
ASSERT((Thread->OwnsProcessWorkingSetShared + Thread->OwnsSessionWorkingSetShared + Thread->OwnsSystemWorkingSetShared) == 1);
|
|
|
|
|
|
|
|
/* Try. */
|
|
|
|
if (!ExConvertPushLockSharedToExclusive(&Vm->WorkingSetMutex))
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
if (Vm == &MmSystemCacheWs)
|
|
|
|
{
|
|
|
|
ASSERT(Thread->OwnsSystemWorkingSetShared);
|
|
|
|
Thread->OwnsSystemWorkingSetShared = FALSE;
|
|
|
|
Thread->OwnsSystemWorkingSetExclusive = TRUE;
|
|
|
|
}
|
|
|
|
else if (Vm->Flags.SessionSpace)
|
|
|
|
{
|
|
|
|
ASSERT(Thread->OwnsSessionWorkingSetShared);
|
|
|
|
Thread->OwnsSessionWorkingSetShared = FALSE;
|
|
|
|
Thread->OwnsSessionWorkingSetExclusive = TRUE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ASSERT(Thread->OwnsProcessWorkingSetShared);
|
|
|
|
Thread->OwnsProcessWorkingSetShared = FALSE;
|
|
|
|
Thread->OwnsProcessWorkingSetExclusive = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2012-09-03 06:23:31 +00:00
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiUnlockProcessWorkingSetForFault(IN PEPROCESS Process,
|
|
|
|
IN PETHREAD Thread,
|
2013-10-11 18:12:16 +00:00
|
|
|
OUT PBOOLEAN Safe,
|
|
|
|
OUT PBOOLEAN Shared)
|
2012-09-03 06:23:31 +00:00
|
|
|
{
|
|
|
|
ASSERT(MI_WS_OWNER(Process));
|
|
|
|
|
|
|
|
/* Check if the current owner is unsafe */
|
|
|
|
if (MI_IS_WS_UNSAFE(Process))
|
|
|
|
{
|
|
|
|
/* Release unsafely */
|
|
|
|
MiUnlockProcessWorkingSetUnsafe(Process, Thread);
|
2013-10-11 18:12:16 +00:00
|
|
|
*Safe = FALSE;
|
|
|
|
*Shared = FALSE;
|
2012-09-03 06:23:31 +00:00
|
|
|
}
|
|
|
|
else if (Thread->OwnsProcessWorkingSetExclusive == 1)
|
|
|
|
{
|
|
|
|
/* Owner is safe and exclusive, release normally */
|
|
|
|
MiUnlockProcessWorkingSet(Process, Thread);
|
2013-10-11 18:12:16 +00:00
|
|
|
*Safe = TRUE;
|
|
|
|
*Shared = FALSE;
|
2012-09-03 06:23:31 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Owner is shared (implies safe), release normally */
|
2013-11-05 22:03:42 +00:00
|
|
|
MiUnlockProcessWorkingSetShared(Process, Thread);
|
2013-10-11 18:12:16 +00:00
|
|
|
*Safe = TRUE;
|
|
|
|
*Shared = TRUE;
|
2012-09-03 06:23:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiLockProcessWorkingSetForFault(IN PEPROCESS Process,
|
|
|
|
IN PETHREAD Thread,
|
|
|
|
IN BOOLEAN Safe,
|
|
|
|
IN BOOLEAN Shared)
|
|
|
|
{
|
|
|
|
/* Check if this was a safe lock or not */
|
|
|
|
if (Safe)
|
|
|
|
{
|
2013-11-05 22:03:42 +00:00
|
|
|
if (Shared)
|
|
|
|
{
|
|
|
|
/* Reacquire safely & shared */
|
|
|
|
MiLockProcessWorkingSetShared(Process, Thread);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Reacquire safely */
|
|
|
|
MiLockProcessWorkingSet(Process, Thread);
|
|
|
|
}
|
2012-09-03 06:23:31 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2013-11-05 22:03:42 +00:00
|
|
|
/* Unsafe lock cannot be shared */
|
|
|
|
ASSERT(Shared == FALSE);
|
2012-09-03 06:23:31 +00:00
|
|
|
/* Reacquire unsafely */
|
|
|
|
MiLockProcessWorkingSetUnsafe(Process, Thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-05 14:39:40 +00:00
|
|
|
FORCEINLINE
|
|
|
|
KIRQL
|
|
|
|
MiAcquireExpansionLock(VOID)
|
|
|
|
{
|
|
|
|
KIRQL OldIrql;
|
|
|
|
|
|
|
|
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
|
|
|
|
KeAcquireSpinLock(&MmExpansionLock, &OldIrql);
|
|
|
|
ASSERT(MiExpansionLockOwner == NULL);
|
|
|
|
MiExpansionLockOwner = PsGetCurrentThread();
|
|
|
|
return OldIrql;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiReleaseExpansionLock(KIRQL OldIrql)
|
|
|
|
{
|
|
|
|
ASSERT(MiExpansionLockOwner == PsGetCurrentThread());
|
|
|
|
MiExpansionLockOwner = NULL;
|
|
|
|
KeReleaseSpinLock(&MmExpansionLock, OldIrql);
|
|
|
|
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
|
|
|
|
}
|
|
|
|
|
2010-10-19 17:07:11 +00:00
|
|
|
//
|
|
|
|
// Returns the ProtoPTE inside a VAD for the given VPN
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
PMMPTE
|
|
|
|
MI_GET_PROTOTYPE_PTE_FOR_VPN(IN PMMVAD Vad,
|
|
|
|
IN ULONG_PTR Vpn)
|
|
|
|
{
|
|
|
|
PMMPTE ProtoPte;
|
|
|
|
|
|
|
|
/* Find the offset within the VAD's prototype PTEs */
|
2010-10-19 17:39:22 +00:00
|
|
|
ProtoPte = Vad->FirstPrototypePte + (Vpn - Vad->StartingVpn);
|
2010-10-19 17:07:11 +00:00
|
|
|
ASSERT(ProtoPte <= Vad->LastContiguousPte);
|
|
|
|
return ProtoPte;
|
|
|
|
}
|
|
|
|
|
2010-11-09 13:26:26 +00:00
|
|
|
//
|
|
|
|
// Returns the PFN Database entry for the given page number
|
|
|
|
// Warning: This is not necessarily a valid PFN database entry!
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
PMMPFN
|
|
|
|
MI_PFN_ELEMENT(IN PFN_NUMBER Pfn)
|
|
|
|
{
|
|
|
|
/* Get the entry */
|
|
|
|
return &MmPfnDatabase[Pfn];
|
|
|
|
};
|
|
|
|
|
2012-08-03 11:34:35 +00:00
|
|
|
//
|
|
|
|
// Drops a locked page without dereferencing it
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiDropLockCount(IN PMMPFN Pfn1)
|
|
|
|
{
|
|
|
|
/* This page shouldn't be locked, but it should be valid */
|
|
|
|
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
|
|
|
|
ASSERT(Pfn1->u2.ShareCount == 0);
|
|
|
|
|
|
|
|
/* Is this the last reference to the page */
|
|
|
|
if (Pfn1->u3.e2.ReferenceCount == 1)
|
|
|
|
{
|
|
|
|
/* It better not be valid */
|
|
|
|
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
|
|
|
|
|
|
|
|
/* Is it a prototype PTE? */
|
|
|
|
if ((Pfn1->u3.e1.PrototypePte == 1) &&
|
|
|
|
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
|
|
|
|
{
|
2012-09-02 20:56:55 +00:00
|
|
|
/* FIXME: We should return commit */
|
|
|
|
DPRINT1("Not returning commit for prototype PTE\n");
|
2012-08-03 11:34:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the counter */
|
|
|
|
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Drops a locked page and dereferences it
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiDereferencePfnAndDropLockCount(IN PMMPFN Pfn1)
|
|
|
|
{
|
|
|
|
USHORT RefCount, OldRefCount;
|
|
|
|
PFN_NUMBER PageFrameIndex;
|
|
|
|
|
|
|
|
/* Loop while we decrement the page successfully */
|
|
|
|
do
|
|
|
|
{
|
|
|
|
/* There should be at least one reference */
|
|
|
|
OldRefCount = Pfn1->u3.e2.ReferenceCount;
|
|
|
|
ASSERT(OldRefCount != 0);
|
|
|
|
|
|
|
|
/* Are we the last one */
|
|
|
|
if (OldRefCount == 1)
|
|
|
|
{
|
|
|
|
/* The page shoudln't be shared not active at this point */
|
|
|
|
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
|
|
|
|
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
|
|
|
|
ASSERT(Pfn1->u2.ShareCount == 0);
|
|
|
|
|
|
|
|
/* Is it a prototype PTE? */
|
|
|
|
if ((Pfn1->u3.e1.PrototypePte == 1) &&
|
|
|
|
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
|
|
|
|
{
|
2012-09-02 20:56:55 +00:00
|
|
|
/* FIXME: We should return commit */
|
|
|
|
DPRINT1("Not returning commit for prototype PTE\n");
|
2012-08-03 11:34:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the counter, and drop a reference the long way */
|
|
|
|
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
PageFrameIndex = MiGetPfnEntryIndex(Pfn1);
|
|
|
|
MiDecrementReferenceCount(Pfn1, PageFrameIndex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Drop a reference the short way, and that's it */
|
|
|
|
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
|
|
|
|
OldRefCount - 1,
|
|
|
|
OldRefCount);
|
|
|
|
ASSERT(RefCount != 0);
|
|
|
|
} while (OldRefCount != RefCount);
|
|
|
|
|
|
|
|
/* If we got here, there should be more than one reference */
|
|
|
|
ASSERT(RefCount > 1);
|
|
|
|
if (RefCount == 2)
|
|
|
|
{
|
|
|
|
/* Is it still being shared? */
|
|
|
|
if (Pfn1->u2.ShareCount >= 1)
|
|
|
|
{
|
|
|
|
/* Then it should be valid */
|
|
|
|
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
|
|
|
|
|
|
|
|
/* Is it a prototype PTE? */
|
|
|
|
if ((Pfn1->u3.e1.PrototypePte == 1) &&
|
|
|
|
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
|
|
|
|
{
|
|
|
|
/* We don't handle ethis */
|
|
|
|
ASSERT(FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the counter */
|
|
|
|
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// References a locked page and updates the counter
|
|
|
|
// Used in MmProbeAndLockPages to handle different edge cases
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiReferenceProbedPageAndBumpLockCount(IN PMMPFN Pfn1)
|
|
|
|
{
|
|
|
|
USHORT RefCount, OldRefCount;
|
|
|
|
|
|
|
|
/* Sanity check */
|
|
|
|
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
|
|
|
|
|
|
|
|
/* Does ARM3 own the page? */
|
|
|
|
if (MI_IS_ROS_PFN(Pfn1))
|
|
|
|
{
|
|
|
|
/* ReactOS Mm doesn't track share count */
|
|
|
|
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* On ARM3 pages, we should see a valid share count */
|
|
|
|
ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
|
|
|
|
|
2012-09-02 21:46:50 +00:00
|
|
|
/* Is it a prototype PTE? */
|
|
|
|
if ((Pfn1->u3.e1.PrototypePte == 1) &&
|
|
|
|
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
|
|
|
|
{
|
|
|
|
/* FIXME: We should charge commit */
|
|
|
|
DPRINT1("Not charging commit for prototype PTE\n");
|
|
|
|
}
|
2012-08-03 11:34:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* More locked pages! */
|
|
|
|
InterlockedIncrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
|
|
|
|
/* Loop trying to update the reference count */
|
|
|
|
do
|
|
|
|
{
|
|
|
|
/* Get the current reference count, make sure it's valid */
|
|
|
|
OldRefCount = Pfn1->u3.e2.ReferenceCount;
|
|
|
|
ASSERT(OldRefCount != 0);
|
|
|
|
ASSERT(OldRefCount < 2500);
|
|
|
|
|
|
|
|
/* Bump it up by one */
|
|
|
|
RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
|
|
|
|
OldRefCount + 1,
|
|
|
|
OldRefCount);
|
|
|
|
ASSERT(RefCount != 0);
|
|
|
|
} while (OldRefCount != RefCount);
|
|
|
|
|
|
|
|
/* Was this the first lock attempt? If not, undo our bump */
|
|
|
|
if (OldRefCount != 1) InterlockedDecrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// References a locked page and updates the counter
|
|
|
|
// Used in all other cases except MmProbeAndLockPages
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiReferenceUsedPageAndBumpLockCount(IN PMMPFN Pfn1)
|
|
|
|
{
|
|
|
|
USHORT NewRefCount;
|
|
|
|
|
|
|
|
/* Is it a prototype PTE? */
|
|
|
|
if ((Pfn1->u3.e1.PrototypePte == 1) &&
|
|
|
|
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
|
|
|
|
{
|
2012-09-02 20:56:55 +00:00
|
|
|
/* FIXME: We should charge commit */
|
|
|
|
DPRINT1("Not charging commit for prototype PTE\n");
|
2012-08-03 11:34:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* More locked pages! */
|
|
|
|
InterlockedIncrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
|
|
|
|
/* Update the reference count */
|
|
|
|
NewRefCount = InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
|
|
|
|
if (NewRefCount == 2)
|
|
|
|
{
|
|
|
|
/* Is it locked or shared? */
|
|
|
|
if (Pfn1->u2.ShareCount)
|
|
|
|
{
|
|
|
|
/* It's shared, so make sure it's active */
|
|
|
|
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* It's locked, so we shouldn't lock again */
|
|
|
|
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Someone had already locked the page, so undo our bump */
|
|
|
|
ASSERT(NewRefCount < 2500);
|
|
|
|
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// References a locked page and updates the counter
|
|
|
|
// Used in all other cases except MmProbeAndLockPages
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiReferenceUnusedPageAndBumpLockCount(IN PMMPFN Pfn1)
|
|
|
|
{
|
|
|
|
USHORT NewRefCount;
|
|
|
|
|
|
|
|
/* Make sure the page isn't used yet */
|
|
|
|
ASSERT(Pfn1->u2.ShareCount == 0);
|
|
|
|
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
|
|
|
|
|
|
|
|
/* Is it a prototype PTE? */
|
|
|
|
if ((Pfn1->u3.e1.PrototypePte == 1) &&
|
|
|
|
(Pfn1->OriginalPte.u.Soft.Prototype == 1))
|
|
|
|
{
|
2012-09-02 20:56:55 +00:00
|
|
|
/* FIXME: We should charge commit */
|
|
|
|
DPRINT1("Not charging commit for prototype PTE\n");
|
2012-08-03 11:34:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* More locked pages! */
|
|
|
|
InterlockedIncrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
|
|
|
|
/* Update the reference count */
|
|
|
|
NewRefCount = InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
|
|
|
|
if (NewRefCount != 1)
|
|
|
|
{
|
|
|
|
/* Someone had already locked the page, so undo our bump */
|
|
|
|
ASSERT(NewRefCount < 2500);
|
|
|
|
InterlockedDecrementSizeT(&MmSystemLockPagesCount);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-03 16:29:31 +00:00
|
|
|
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-10-18 23:07:09 +00:00
|
|
|
BOOLEAN
|
2009-10-15 19:12:43 +00:00
|
|
|
NTAPI
|
|
|
|
MmArmInitSystem(
|
|
|
|
IN ULONG Phase,
|
|
|
|
IN PLOADER_PARAMETER_BLOCK LoaderBlock
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2011-09-24 08:52:26 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
2016-08-06 09:34:39 +00:00
|
|
|
MiInitializeSessionSpaceLayout(VOID);
|
2011-09-24 08:52:26 +00:00
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-02-10 02:00:56 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiInitMachineDependent(
|
|
|
|
IN PLOADER_PARAMETER_BLOCK LoaderBlock
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-02-11 00:01:32 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiComputeColorInformation(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-02-11 00:01:32 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiMapPfnDatabase(
|
|
|
|
IN PLOADER_PARAMETER_BLOCK LoaderBlock
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-02-11 00:01:32 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializeColorTables(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-02-11 00:01:32 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializePfnDatabase(
|
|
|
|
IN PLOADER_PARAMETER_BLOCK LoaderBlock
|
|
|
|
);
|
|
|
|
|
2013-11-20 23:50:42 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializeSessionWsSupport(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2012-02-06 17:41:49 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializeSessionIds(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
[NTOS]: Make MM init read MmProductType to determine what SKU of ReactOS this is, instead of assuming Server. If you want to go back to the old behavior, you need to change "WinNT" to "ServerNT" in the hivesys under Product Type.
[NTOS]: Configure the MmSystemSize variable properly based on SKU and RAM. Previously, ReactOS told all drivers and applications you were running on a system with < 13MB RAM.
[NTOS]: Initialize thresholds for low and high memory (in pages), low and high paged pool memory, and low and high nonpaged pool memory. These are described in the source.
[NTOS]: Initialize events for each of those thresholds, and populate the \KernelObject\xxxCondition events that are documented in MSDN for driver and app developers.
[NTOS]: Define some internal thresholds to use later, representing the minimum number of free pages under we go berserk, and the minimum number of free pages that we consider "plenty".
[NTOS]: Rename MiRemoveFromList to MiUnlinkFreeOrZeroedPage (Windows name). Make the function handle MmAvailablePages decrement, instead of having the caller do it.
[NTOS]: Remove run-time initialization of the PFN lists, just initialize them statically (also fixes the fact we forgot to initialize their names).
[NTOS]: Move some more initialization code to ARM3 instead of mm.
[NTOS]: Read ProductType from registry into MmProductType instead of dummy value. Remove duplicate "Mirroring" variable read.
svn path=/trunk/; revision=45638
2010-02-20 21:48:36 +00:00
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
MiInitializeMemoryEvents(
|
|
|
|
VOID
|
|
|
|
);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-02-10 17:42:07 +00:00
|
|
|
PFN_NUMBER
|
|
|
|
NTAPI
|
|
|
|
MxGetNextPage(
|
|
|
|
IN PFN_NUMBER PageCount
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-02-10 02:00:56 +00:00
|
|
|
PPHYSICAL_MEMORY_DESCRIPTOR
|
|
|
|
NTAPI
|
|
|
|
MmInitializeMemoryLimits(
|
|
|
|
IN PLOADER_PARAMETER_BLOCK LoaderBlock,
|
|
|
|
IN PBOOLEAN IncludeType
|
|
|
|
);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-02-10 02:00:56 +00:00
|
|
|
PFN_NUMBER
|
|
|
|
NTAPI
|
|
|
|
MiPagesInLoaderBlock(
|
|
|
|
IN PLOADER_PARAMETER_BLOCK LoaderBlock,
|
|
|
|
IN PBOOLEAN IncludeType
|
|
|
|
);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2010-02-10 02:00:56 +00:00
|
|
|
VOID
|
|
|
|
FASTCALL
|
|
|
|
MiSyncARM3WithROS(
|
|
|
|
IN PVOID AddressStart,
|
|
|
|
IN PVOID AddressEnd
|
|
|
|
);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2012-03-04 02:03:46 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiRosProtectVirtualMemory(
|
|
|
|
IN PEPROCESS Process,
|
|
|
|
IN OUT PVOID *BaseAddress,
|
|
|
|
IN OUT PSIZE_T NumberOfBytesToProtect,
|
|
|
|
IN ULONG NewAccessProtection,
|
|
|
|
OUT PULONG OldAccessProtection OPTIONAL
|
|
|
|
);
|
|
|
|
|
2009-10-15 19:12:43 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MmArmAccessFault(
|
2018-01-01 21:52:37 +00:00
|
|
|
IN ULONG FaultCode,
|
2009-10-15 19:12:43 +00:00
|
|
|
IN PVOID Address,
|
|
|
|
IN KPROCESSOR_MODE Mode,
|
|
|
|
IN PVOID TrapInformation
|
|
|
|
);
|
|
|
|
|
2010-05-09 18:17:53 +00:00
|
|
|
NTSTATUS
|
|
|
|
FASTCALL
|
|
|
|
MiCheckPdeForPagedPool(
|
|
|
|
IN PVOID Address
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
[NTOS]: Make MM init read MmProductType to determine what SKU of ReactOS this is, instead of assuming Server. If you want to go back to the old behavior, you need to change "WinNT" to "ServerNT" in the hivesys under Product Type.
[NTOS]: Configure the MmSystemSize variable properly based on SKU and RAM. Previously, ReactOS told all drivers and applications you were running on a system with < 13MB RAM.
[NTOS]: Initialize thresholds for low and high memory (in pages), low and high paged pool memory, and low and high nonpaged pool memory. These are described in the source.
[NTOS]: Initialize events for each of those thresholds, and populate the \KernelObject\xxxCondition events that are documented in MSDN for driver and app developers.
[NTOS]: Define some internal thresholds to use later, representing the minimum number of free pages under we go berserk, and the minimum number of free pages that we consider "plenty".
[NTOS]: Rename MiRemoveFromList to MiUnlinkFreeOrZeroedPage (Windows name). Make the function handle MmAvailablePages decrement, instead of having the caller do it.
[NTOS]: Remove run-time initialization of the PFN lists, just initialize them statically (also fixes the fact we forgot to initialize their names).
[NTOS]: Move some more initialization code to ARM3 instead of mm.
[NTOS]: Read ProductType from registry into MmProductType instead of dummy value. Remove duplicate "Mirroring" variable read.
svn path=/trunk/; revision=45638
2010-02-20 21:48:36 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializeNonPagedPoolThresholds(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
[NTOS]: Make MM init read MmProductType to determine what SKU of ReactOS this is, instead of assuming Server. If you want to go back to the old behavior, you need to change "WinNT" to "ServerNT" in the hivesys under Product Type.
[NTOS]: Configure the MmSystemSize variable properly based on SKU and RAM. Previously, ReactOS told all drivers and applications you were running on a system with < 13MB RAM.
[NTOS]: Initialize thresholds for low and high memory (in pages), low and high paged pool memory, and low and high nonpaged pool memory. These are described in the source.
[NTOS]: Initialize events for each of those thresholds, and populate the \KernelObject\xxxCondition events that are documented in MSDN for driver and app developers.
[NTOS]: Define some internal thresholds to use later, representing the minimum number of free pages under we go berserk, and the minimum number of free pages that we consider "plenty".
[NTOS]: Rename MiRemoveFromList to MiUnlinkFreeOrZeroedPage (Windows name). Make the function handle MmAvailablePages decrement, instead of having the caller do it.
[NTOS]: Remove run-time initialization of the PFN lists, just initialize them statically (also fixes the fact we forgot to initialize their names).
[NTOS]: Move some more initialization code to ARM3 instead of mm.
[NTOS]: Read ProductType from registry into MmProductType instead of dummy value. Remove duplicate "Mirroring" variable read.
svn path=/trunk/; revision=45638
2010-02-20 21:48:36 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializePoolEvents(
|
2009-06-22 08:22:41 +00:00
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2009-07-12 12:59:21 +00:00
|
|
|
VOID //
|
|
|
|
NTAPI //
|
|
|
|
InitializePool( //
|
|
|
|
IN POOL_TYPE PoolType,// FIXFIX: This should go in ex.h after the pool merge
|
|
|
|
IN ULONG Threshold //
|
|
|
|
); //
|
2009-07-09 09:33:49 +00:00
|
|
|
|
2012-07-15 23:42:27 +00:00
|
|
|
// FIXFIX: THIS ONE TOO
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2012-07-15 23:42:27 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
ExInitializePoolDescriptor(
|
|
|
|
IN PPOOL_DESCRIPTOR PoolDescriptor,
|
|
|
|
IN POOL_TYPE PoolType,
|
|
|
|
IN ULONG PoolIndex,
|
|
|
|
IN ULONG Threshold,
|
|
|
|
IN PVOID PoolLock
|
|
|
|
);
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiInitializeSessionPool(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2009-06-22 08:51:28 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializeSystemPtes(
|
|
|
|
IN PMMPTE StartingPte,
|
|
|
|
IN ULONG NumberOfPtes,
|
|
|
|
IN MMSYSTEM_PTE_POOL_TYPE PoolType
|
|
|
|
);
|
|
|
|
|
2009-06-23 07:49:39 +00:00
|
|
|
PMMPTE
|
|
|
|
NTAPI
|
|
|
|
MiReserveSystemPtes(
|
|
|
|
IN ULONG NumberOfPtes,
|
|
|
|
IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
|
|
|
|
);
|
|
|
|
|
2009-06-27 03:03:24 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiReleaseSystemPtes(
|
|
|
|
IN PMMPTE StartingPte,
|
|
|
|
IN ULONG NumberOfPtes,
|
|
|
|
IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
|
|
|
|
);
|
|
|
|
|
- Reimplement MmAllocateContiguousMemory, MmAllocateContiguousMemorySpecifyCache, MmFreeContiguousMemory, MmFreeContiguousMemorySpecifyCache:
- Use a smarter algorithm (as described here: http://www.microsoft.com/whdc/Driver/tips/ContigMem.mspx) to first try to satisfy the allocation by a simple nonpaged pool allocation (for cached requests only).
- This range is then checked for physical continuity, since it's not guaranteed for non-initial-pool allocations (and right now in ReactOS, it never is).
- As a fallback, Windows NT then attempts to scan free nonpaged pool pages. This is not yet implemented since the ReactOS nonpaged pool is not usually contiguous (to the level that NT's is).
- When the ARM pool is implemented and replaces nonpaged pool, this code path will have to be implemented.
- As a last resort, the actual PFN database is scanned for contiguous free pages.
- ReactOS used MmGetContiguousPages for this, which blindly scanned the PFN database. New MiFindContinuousPages will scan the physical memory descriptor block recently implemented, which avoids going over pages we already know are going to be unusable.
- The ReactOS function also held the PFN lock for the entire duration of the scan, which is significant on systems with large memory. Instead, we make an initial unsafe scan first, and only lock when we think we've found a correct range (and we'll then reconfirm the ranges).
- Finally, the older function actually did a double-scan to try to avoid using memory ranges under 16MB, which was useless on today's systems and also rather inefficient.
- Other than that, the actual setup of the PFN entry is copy-pasted from the old ReactOS function, so nothing's changed there -- the page still looks the same, but the selection algorithm is faster and more accurate.
- Once the pages are found, we piggyback on the new I/O mapping mechanism (which uses System PTEs) instead of doing it all over by hand as before.
- Since the underlying support is still System PTEs, once again, optimizations to that component will yield significant improvements here too.
svn path=/trunk/; revision=41657
2009-06-28 05:43:12 +00:00
|
|
|
|
|
|
|
PFN_NUMBER
|
|
|
|
NTAPI
|
|
|
|
MiFindContiguousPages(
|
|
|
|
IN PFN_NUMBER LowestPfn,
|
|
|
|
IN PFN_NUMBER HighestPfn,
|
|
|
|
IN PFN_NUMBER BoundaryPfn,
|
|
|
|
IN PFN_NUMBER SizeInPages,
|
|
|
|
IN MEMORY_CACHING_TYPE CacheType
|
|
|
|
);
|
|
|
|
|
|
|
|
PVOID
|
|
|
|
NTAPI
|
|
|
|
MiCheckForContiguousMemory(
|
|
|
|
IN PVOID BaseAddress,
|
|
|
|
IN PFN_NUMBER BaseAddressPages,
|
|
|
|
IN PFN_NUMBER SizeInPages,
|
|
|
|
IN PFN_NUMBER LowestPfn,
|
|
|
|
IN PFN_NUMBER HighestPfn,
|
|
|
|
IN PFN_NUMBER BoundaryPfn,
|
|
|
|
IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
|
|
|
|
);
|
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 08:29:22 +00:00
|
|
|
PMDL
|
|
|
|
NTAPI
|
|
|
|
MiAllocatePagesForMdl(
|
|
|
|
IN PHYSICAL_ADDRESS LowAddress,
|
|
|
|
IN PHYSICAL_ADDRESS HighAddress,
|
|
|
|
IN PHYSICAL_ADDRESS SkipBytes,
|
|
|
|
IN SIZE_T TotalBytes,
|
|
|
|
IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
|
|
|
|
IN ULONG Flags
|
|
|
|
);
|
|
|
|
|
2010-02-20 14:40:21 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
2010-09-28 14:38:30 +00:00
|
|
|
MiInsertPageInList(
|
2010-02-20 14:40:21 +00:00
|
|
|
IN PMMPFNLIST ListHead,
|
2010-09-28 14:38:30 +00:00
|
|
|
IN PFN_NUMBER PageFrameIndex
|
2010-02-21 03:52:22 +00:00
|
|
|
);
|
|
|
|
|
2010-02-20 14:40:21 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
[NTOS]: Make MM init read MmProductType to determine what SKU of ReactOS this is, instead of assuming Server. If you want to go back to the old behavior, you need to change "WinNT" to "ServerNT" in the hivesys under Product Type.
[NTOS]: Configure the MmSystemSize variable properly based on SKU and RAM. Previously, ReactOS told all drivers and applications you were running on a system with < 13MB RAM.
[NTOS]: Initialize thresholds for low and high memory (in pages), low and high paged pool memory, and low and high nonpaged pool memory. These are described in the source.
[NTOS]: Initialize events for each of those thresholds, and populate the \KernelObject\xxxCondition events that are documented in MSDN for driver and app developers.
[NTOS]: Define some internal thresholds to use later, representing the minimum number of free pages under we go berserk, and the minimum number of free pages that we consider "plenty".
[NTOS]: Rename MiRemoveFromList to MiUnlinkFreeOrZeroedPage (Windows name). Make the function handle MmAvailablePages decrement, instead of having the caller do it.
[NTOS]: Remove run-time initialization of the PFN lists, just initialize them statically (also fixes the fact we forgot to initialize their names).
[NTOS]: Move some more initialization code to ARM3 instead of mm.
[NTOS]: Read ProductType from registry into MmProductType instead of dummy value. Remove duplicate "Mirroring" variable read.
svn path=/trunk/; revision=45638
2010-02-20 21:48:36 +00:00
|
|
|
MiUnlinkFreeOrZeroedPage(
|
2010-02-20 14:40:21 +00:00
|
|
|
IN PMMPFN Entry
|
|
|
|
);
|
|
|
|
|
2012-03-26 07:26:36 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiUnlinkPageFromList(
|
|
|
|
IN PMMPFN Pfn
|
|
|
|
);
|
|
|
|
|
2010-05-29 18:33:50 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializePfn(
|
|
|
|
IN PFN_NUMBER PageFrameIndex,
|
|
|
|
IN PMMPTE PointerPte,
|
|
|
|
IN BOOLEAN Modified
|
|
|
|
);
|
|
|
|
|
2012-07-15 23:42:27 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiInitializeAndChargePfn(
|
|
|
|
OUT PPFN_NUMBER PageFrameIndex,
|
2015-05-10 19:35:00 +00:00
|
|
|
IN PMMPDE PointerPde,
|
2012-07-15 23:42:27 +00:00
|
|
|
IN PFN_NUMBER ContainingPageFrame,
|
|
|
|
IN BOOLEAN SessionAllocation
|
|
|
|
);
|
|
|
|
|
2011-09-30 21:45:25 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializePfnAndMakePteValid(
|
|
|
|
IN PFN_NUMBER PageFrameIndex,
|
|
|
|
IN PMMPTE PointerPte,
|
|
|
|
IN MMPTE TempPte
|
|
|
|
);
|
|
|
|
|
2010-06-04 22:08:40 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializePfnForOtherProcess(
|
|
|
|
IN PFN_NUMBER PageFrameIndex,
|
2015-05-10 19:35:00 +00:00
|
|
|
IN PVOID PteAddress,
|
2010-06-04 22:08:40 +00:00
|
|
|
IN PFN_NUMBER PteFrame
|
|
|
|
);
|
|
|
|
|
2010-05-29 18:33:50 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiDecrementShareCount(
|
|
|
|
IN PMMPFN Pfn1,
|
|
|
|
IN PFN_NUMBER PageFrameIndex
|
|
|
|
);
|
|
|
|
|
2010-05-09 18:17:53 +00:00
|
|
|
PFN_NUMBER
|
|
|
|
NTAPI
|
|
|
|
MiRemoveAnyPage(
|
|
|
|
IN ULONG Color
|
|
|
|
);
|
2010-02-20 14:40:21 +00:00
|
|
|
|
2010-06-06 14:13:35 +00:00
|
|
|
PFN_NUMBER
|
|
|
|
NTAPI
|
|
|
|
MiRemoveZeroPage(
|
|
|
|
IN ULONG Color
|
|
|
|
);
|
|
|
|
|
2010-07-24 15:30:24 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiZeroPhysicalPage(
|
|
|
|
IN PFN_NUMBER PageFrameIndex
|
|
|
|
);
|
|
|
|
|
2010-02-20 14:40:21 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInsertPageInFreeList(
|
|
|
|
IN PFN_NUMBER PageFrameIndex
|
|
|
|
);
|
|
|
|
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 13:11:45 +00:00
|
|
|
PFN_COUNT
|
2010-06-05 04:16:46 +00:00
|
|
|
NTAPI
|
|
|
|
MiDeleteSystemPageableVm(
|
|
|
|
IN PMMPTE PointerPte,
|
|
|
|
IN PFN_NUMBER PageCount,
|
|
|
|
IN ULONG Flags,
|
|
|
|
OUT PPFN_NUMBER ValidPages
|
|
|
|
);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2012-02-29 16:58:46 +00:00
|
|
|
ULONG
|
|
|
|
NTAPI
|
|
|
|
MiGetPageProtection(
|
|
|
|
IN PMMPTE PointerPte
|
|
|
|
);
|
|
|
|
|
2010-04-20 22:47:51 +00:00
|
|
|
PLDR_DATA_TABLE_ENTRY
|
|
|
|
NTAPI
|
|
|
|
MiLookupDataTableEntry(
|
|
|
|
IN PVOID Address
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-04-20 22:47:51 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializeDriverLargePageList(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-04-20 22:47:51 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializeLargePageSupport(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2021-05-11 15:13:14 +00:00
|
|
|
CODE_SEG("INIT")
|
2010-04-20 22:47:51 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiSyncCachedRanges(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
2010-05-12 20:57:21 +00:00
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
MiIsPfnInUse(
|
|
|
|
IN PMMPFN Pfn1
|
|
|
|
);
|
|
|
|
|
2010-07-22 02:10:43 +00:00
|
|
|
PMMVAD
|
|
|
|
NTAPI
|
|
|
|
MiLocateAddress(
|
|
|
|
IN PVOID VirtualAddress
|
|
|
|
);
|
|
|
|
|
2014-05-17 20:34:11 +00:00
|
|
|
TABLE_SEARCH_RESULT
|
2010-07-22 02:10:43 +00:00
|
|
|
NTAPI
|
|
|
|
MiCheckForConflictingNode(
|
|
|
|
IN ULONG_PTR StartVpn,
|
|
|
|
IN ULONG_PTR EndVpn,
|
2014-05-17 20:34:11 +00:00
|
|
|
IN PMM_AVL_TABLE Table,
|
|
|
|
OUT PMMADDRESS_NODE *NodeOrParent
|
2010-07-22 02:10:43 +00:00
|
|
|
);
|
|
|
|
|
2010-08-23 03:00:03 +00:00
|
|
|
TABLE_SEARCH_RESULT
|
2010-07-22 02:10:43 +00:00
|
|
|
NTAPI
|
|
|
|
MiFindEmptyAddressRangeDownTree(
|
|
|
|
IN SIZE_T Length,
|
|
|
|
IN ULONG_PTR BoundaryAddress,
|
|
|
|
IN ULONG_PTR Alignment,
|
|
|
|
IN PMM_AVL_TABLE Table,
|
2010-08-23 03:00:03 +00:00
|
|
|
OUT PULONG_PTR Base,
|
|
|
|
OUT PMMADDRESS_NODE *Parent
|
2010-07-22 02:10:43 +00:00
|
|
|
);
|
|
|
|
|
2012-03-26 07:26:36 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiFindEmptyAddressRangeDownBasedTree(
|
|
|
|
IN SIZE_T Length,
|
|
|
|
IN ULONG_PTR BoundaryAddress,
|
|
|
|
IN ULONG_PTR Alignment,
|
|
|
|
IN PMM_AVL_TABLE Table,
|
|
|
|
OUT PULONG_PTR Base
|
|
|
|
);
|
|
|
|
|
2014-05-17 20:34:11 +00:00
|
|
|
TABLE_SEARCH_RESULT
|
2010-10-05 05:01:15 +00:00
|
|
|
NTAPI
|
|
|
|
MiFindEmptyAddressRangeInTree(
|
|
|
|
IN SIZE_T Length,
|
|
|
|
IN ULONG_PTR Alignment,
|
|
|
|
IN PMM_AVL_TABLE Table,
|
|
|
|
OUT PMMADDRESS_NODE *PreviousVad,
|
|
|
|
OUT PULONG_PTR Base
|
|
|
|
);
|
|
|
|
|
2012-09-02 18:54:05 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiCheckSecuredVad(
|
|
|
|
IN PMMVAD Vad,
|
|
|
|
IN PVOID Base,
|
|
|
|
IN SIZE_T Size,
|
|
|
|
IN ULONG ProtectionMask
|
|
|
|
);
|
|
|
|
|
2017-06-18 11:18:41 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInsertVad(
|
|
|
|
_Inout_ PMMVAD Vad,
|
|
|
|
_Inout_ PMM_AVL_TABLE VadRoot);
|
|
|
|
|
2014-10-08 00:31:17 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiInsertVadEx(
|
|
|
|
_Inout_ PMMVAD Vad,
|
|
|
|
_In_ ULONG_PTR *BaseAddress,
|
|
|
|
_In_ SIZE_T ViewSize,
|
|
|
|
_In_ ULONG_PTR HighestAddress,
|
|
|
|
_In_ ULONG_PTR Alignment,
|
|
|
|
_In_ ULONG AllocationType);
|
|
|
|
|
2012-03-26 07:26:36 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInsertBasedSection(
|
|
|
|
IN PSECTION Section
|
|
|
|
);
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiUnmapViewOfSection(
|
|
|
|
IN PEPROCESS Process,
|
|
|
|
IN PVOID BaseAddress,
|
|
|
|
IN ULONG Flags
|
|
|
|
);
|
|
|
|
|
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiRosUnmapViewOfSection(
|
|
|
|
IN PEPROCESS Process,
|
|
|
|
IN PVOID BaseAddress,
|
2017-06-08 20:34:47 +00:00
|
|
|
IN BOOLEAN SkipDebuggerNotify
|
2012-03-26 07:26:36 +00:00
|
|
|
);
|
|
|
|
|
2010-07-22 02:10:43 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInsertNode(
|
2010-08-23 03:00:03 +00:00
|
|
|
IN PMM_AVL_TABLE Table,
|
2010-07-22 02:10:43 +00:00
|
|
|
IN PMMADDRESS_NODE NewNode,
|
2010-08-23 03:00:03 +00:00
|
|
|
PMMADDRESS_NODE Parent,
|
|
|
|
TABLE_SEARCH_RESULT Result
|
2010-07-22 02:10:43 +00:00
|
|
|
);
|
|
|
|
|
2010-07-24 04:00:22 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiRemoveNode(
|
|
|
|
IN PMMADDRESS_NODE Node,
|
|
|
|
IN PMM_AVL_TABLE Table
|
|
|
|
);
|
|
|
|
|
2010-07-24 16:28:51 +00:00
|
|
|
PMMADDRESS_NODE
|
|
|
|
NTAPI
|
|
|
|
MiGetPreviousNode(
|
|
|
|
IN PMMADDRESS_NODE Node
|
|
|
|
);
|
|
|
|
|
|
|
|
PMMADDRESS_NODE
|
|
|
|
NTAPI
|
|
|
|
MiGetNextNode(
|
|
|
|
IN PMMADDRESS_NODE Node
|
|
|
|
);
|
|
|
|
|
2010-10-04 18:34:41 +00:00
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
MiInitializeSystemSpaceMap(
|
2012-07-15 23:42:27 +00:00
|
|
|
IN PMMSESSION InputSession OPTIONAL
|
2010-10-04 18:34:41 +00:00
|
|
|
);
|
|
|
|
|
2012-08-01 07:54:37 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiSessionRemoveProcess(
|
|
|
|
VOID
|
|
|
|
);
|
|
|
|
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiReleaseProcessReferenceToSessionDataPage(
|
|
|
|
IN PMM_SESSION_SPACE SessionGlobal
|
|
|
|
);
|
|
|
|
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiSessionAddProcess(
|
|
|
|
IN PEPROCESS NewProcess
|
|
|
|
);
|
|
|
|
|
2012-07-21 19:07:11 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiSessionCommitPageTables(
|
|
|
|
IN PVOID StartVa,
|
|
|
|
IN PVOID EndVa
|
|
|
|
);
|
|
|
|
|
2010-10-06 04:34:36 +00:00
|
|
|
ULONG
|
|
|
|
NTAPI
|
|
|
|
MiMakeProtectionMask(
|
|
|
|
IN ULONG Protect
|
|
|
|
);
|
|
|
|
|
2010-10-17 20:02:17 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiDeleteVirtualAddresses(
|
|
|
|
IN ULONG_PTR Va,
|
|
|
|
IN ULONG_PTR EndingAddress,
|
|
|
|
IN PMMVAD Vad
|
|
|
|
);
|
|
|
|
|
2016-08-19 17:24:53 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiDeletePte(
|
|
|
|
IN PMMPTE PointerPte,
|
|
|
|
IN PVOID VirtualAddress,
|
|
|
|
IN PEPROCESS CurrentProcess,
|
|
|
|
IN PMMPTE PrototypePte
|
|
|
|
);
|
|
|
|
|
2010-10-17 20:02:17 +00:00
|
|
|
ULONG
|
|
|
|
NTAPI
|
|
|
|
MiMakeSystemAddressValid(
|
|
|
|
IN PVOID PageTableVirtualAddress,
|
|
|
|
IN PEPROCESS CurrentProcess
|
|
|
|
);
|
2010-10-19 17:07:11 +00:00
|
|
|
|
|
|
|
ULONG
|
|
|
|
NTAPI
|
|
|
|
MiMakeSystemAddressValidPfn(
|
|
|
|
IN PVOID VirtualAddress,
|
|
|
|
IN KIRQL OldIrql
|
|
|
|
);
|
|
|
|
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiRemoveMappedView(
|
|
|
|
IN PEPROCESS CurrentProcess,
|
|
|
|
IN PMMVAD Vad
|
|
|
|
);
|
|
|
|
|
|
|
|
PSUBSECTION
|
|
|
|
NTAPI
|
|
|
|
MiLocateSubsection(
|
|
|
|
IN PMMVAD Vad,
|
|
|
|
IN ULONG_PTR Vpn
|
|
|
|
);
|
2010-12-22 16:14:58 +00:00
|
|
|
|
2014-08-06 21:53:57 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiDeleteARM3Section(
|
|
|
|
PVOID ObjectBody
|
|
|
|
);
|
|
|
|
|
2012-03-04 02:03:46 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiQueryMemorySectionName(
|
|
|
|
IN HANDLE ProcessHandle,
|
|
|
|
IN PVOID BaseAddress,
|
|
|
|
OUT PVOID MemoryInformation,
|
|
|
|
IN SIZE_T MemoryInformationLength,
|
|
|
|
OUT PSIZE_T ReturnLength
|
|
|
|
);
|
|
|
|
|
2012-03-26 07:26:36 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
MiRosUnmapViewInSystemSpace(
|
|
|
|
IN PVOID MappedBase
|
|
|
|
);
|
|
|
|
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiMakePdeExistAndMakeValid(
|
2015-05-10 19:35:00 +00:00
|
|
|
IN PMMPDE PointerPde,
|
2012-03-26 07:26:36 +00:00
|
|
|
IN PEPROCESS TargetProcess,
|
|
|
|
IN KIRQL OldIrql
|
|
|
|
);
|
|
|
|
|
2019-01-18 21:11:43 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiWriteProtectSystemImage(
|
|
|
|
_In_ PVOID ImageBase);
|
|
|
|
|
2010-09-29 01:10:28 +00:00
|
|
|
//
|
|
|
|
// MiRemoveZeroPage will use inline code to zero out the page manually if only
|
|
|
|
// free pages are available. In some scenarios, we don't/can't run that piece of
|
|
|
|
// code and would rather only have a real zero page. If we can't have a zero page,
|
|
|
|
// then we'd like to have our own code to grab a free page and zero it out, by
|
|
|
|
// using MiRemoveAnyPage. This macro implements this.
|
|
|
|
//
|
|
|
|
FORCEINLINE
|
2013-11-26 13:45:33 +00:00
|
|
|
PFN_NUMBER
|
2010-09-29 01:10:28 +00:00
|
|
|
MiRemoveZeroPageSafe(IN ULONG Color)
|
|
|
|
{
|
|
|
|
if (MmFreePagesByColor[ZeroedPageList][Color].Flink != LIST_HEAD) return MiRemoveZeroPage(Color);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-25 19:13:50 +00:00
|
|
|
#if (_MI_PAGING_LEVELS == 2)
|
|
|
|
FORCEINLINE
|
|
|
|
BOOLEAN
|
|
|
|
MiSynchronizeSystemPde(PMMPDE PointerPde)
|
|
|
|
{
|
|
|
|
ULONG Index;
|
|
|
|
|
|
|
|
/* Get the Index from the PDE */
|
|
|
|
Index = ((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE);
|
2022-09-09 17:17:14 +00:00
|
|
|
if (PointerPde->u.Hard.Valid != 0)
|
|
|
|
{
|
|
|
|
NT_ASSERT(PointerPde->u.Long == MmSystemPagePtes[Index].u.Long);
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MmSystemPagePtes[Index].u.Hard.Valid == 0)
|
|
|
|
{
|
|
|
|
return FALSE;
|
|
|
|
}
|
2017-12-25 19:13:50 +00:00
|
|
|
|
|
|
|
/* Copy the PDE from the double-mapped system page directory */
|
2022-09-09 17:17:14 +00:00
|
|
|
MI_WRITE_VALID_PDE(PointerPde, MmSystemPagePtes[Index]);
|
2017-12-25 19:13:50 +00:00
|
|
|
|
|
|
|
/* Make sure we re-read the PDE and PTE */
|
|
|
|
KeMemoryBarrierWithoutFence();
|
|
|
|
|
2022-09-09 17:17:14 +00:00
|
|
|
/* Return success */
|
|
|
|
return TRUE;
|
2017-12-25 19:13:50 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-05-28 14:58:59 +00:00
|
|
|
#if _MI_PAGING_LEVELS == 2
|
|
|
|
FORCEINLINE
|
|
|
|
USHORT
|
|
|
|
MiIncrementPageTableReferences(IN PVOID Address)
|
|
|
|
{
|
|
|
|
PUSHORT RefCount;
|
|
|
|
|
|
|
|
RefCount = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)];
|
|
|
|
|
|
|
|
*RefCount += 1;
|
|
|
|
ASSERT(*RefCount <= PTE_PER_PAGE);
|
|
|
|
return *RefCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
|
|
|
USHORT
|
|
|
|
MiDecrementPageTableReferences(IN PVOID Address)
|
|
|
|
{
|
|
|
|
PUSHORT RefCount;
|
|
|
|
|
|
|
|
RefCount = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)];
|
|
|
|
|
|
|
|
*RefCount -= 1;
|
|
|
|
ASSERT(*RefCount < PTE_PER_PAGE);
|
|
|
|
return *RefCount;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
FORCEINLINE
|
|
|
|
USHORT
|
|
|
|
MiIncrementPageTableReferences(IN PVOID Address)
|
|
|
|
{
|
|
|
|
PMMPDE PointerPde = MiAddressToPde(Address);
|
|
|
|
PMMPFN Pfn;
|
|
|
|
|
|
|
|
/* We should not tinker with this one. */
|
|
|
|
ASSERT(PointerPde != (PMMPDE)PXE_SELFMAP);
|
|
|
|
DPRINT("Incrementing %p from %p\n", Address, _ReturnAddress());
|
|
|
|
|
|
|
|
/* Make sure we're locked */
|
|
|
|
ASSERT(PsGetCurrentThread()->OwnsProcessWorkingSetExclusive);
|
|
|
|
|
|
|
|
/* If we're bumping refcount, then it must be valid! */
|
|
|
|
ASSERT(PointerPde->u.Hard.Valid == 1);
|
|
|
|
|
|
|
|
/* This lies on the PFN */
|
|
|
|
Pfn = MiGetPfnEntry(PFN_FROM_PDE(PointerPde));
|
|
|
|
Pfn->OriginalPte.u.Soft.UsedPageTableEntries++;
|
|
|
|
|
|
|
|
ASSERT(Pfn->OriginalPte.u.Soft.UsedPageTableEntries <= PTE_PER_PAGE);
|
|
|
|
|
|
|
|
return Pfn->OriginalPte.u.Soft.UsedPageTableEntries;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE
|
|
|
|
USHORT
|
|
|
|
MiDecrementPageTableReferences(IN PVOID Address)
|
|
|
|
{
|
|
|
|
PMMPDE PointerPde = MiAddressToPde(Address);
|
|
|
|
PMMPFN Pfn;
|
|
|
|
|
|
|
|
/* We should not tinker with this one. */
|
|
|
|
ASSERT(PointerPde != (PMMPDE)PXE_SELFMAP);
|
|
|
|
|
|
|
|
DPRINT("Decrementing %p from %p\n", PointerPde, _ReturnAddress());
|
|
|
|
|
|
|
|
/* Make sure we're locked */
|
|
|
|
ASSERT(PsGetCurrentThread()->OwnsProcessWorkingSetExclusive);
|
|
|
|
|
|
|
|
/* If we're decreasing refcount, then it must be valid! */
|
|
|
|
ASSERT(PointerPde->u.Hard.Valid == 1);
|
|
|
|
|
|
|
|
/* This lies on the PFN */
|
|
|
|
Pfn = MiGetPfnEntry(PFN_FROM_PDE(PointerPde));
|
|
|
|
|
|
|
|
ASSERT(Pfn->OriginalPte.u.Soft.UsedPageTableEntries != 0);
|
|
|
|
Pfn->OriginalPte.u.Soft.UsedPageTableEntries--;
|
|
|
|
|
|
|
|
ASSERT(Pfn->OriginalPte.u.Soft.UsedPageTableEntries < PTE_PER_PAGE);
|
|
|
|
|
|
|
|
return Pfn->OriginalPte.u.Soft.UsedPageTableEntries;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-03-26 08:32:34 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
} // extern "C"
|
|
|
|
#endif
|
|
|
|
|
2021-05-28 14:58:59 +00:00
|
|
|
FORCEINLINE
|
|
|
|
VOID
|
|
|
|
MiDeletePde(
|
|
|
|
_In_ PMMPDE PointerPde,
|
|
|
|
_In_ PEPROCESS CurrentProcess)
|
|
|
|
{
|
|
|
|
/* Only for user-mode ones */
|
|
|
|
ASSERT(MiIsUserPde(PointerPde));
|
|
|
|
|
|
|
|
/* Kill this one as a PTE */
|
|
|
|
MiDeletePte((PMMPTE)PointerPde, MiPdeToPte(PointerPde), CurrentProcess, NULL);
|
|
|
|
#if _MI_PAGING_LEVELS >= 3
|
|
|
|
/* Cascade down */
|
|
|
|
if (MiDecrementPageTableReferences(MiPdeToPte(PointerPde)) == 0)
|
|
|
|
{
|
|
|
|
MiDeletePte(MiPdeToPpe(PointerPde), PointerPde, CurrentProcess, NULL);
|
|
|
|
#if _MI_PAGING_LEVELS == 4
|
|
|
|
if (MiDecrementPageTableReferences(PointerPde) == 0)
|
|
|
|
{
|
|
|
|
MiDeletePte(MiPdeToPxe(PointerPde), MiPdeToPpe(PointerPde), CurrentProcess, NULL);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-06-22 06:16:57 +00:00
|
|
|
/* EOF */
|