[NTOSKRNL]

Remove all #line 15 "ARM³::BLA"
Reasons:
- It doesn't provide any benefits, its only purpose was to "look cool"
- It never looked cool, instead a character mess appeared
- It makes finding the related file harder, especially when the file is named differently then the description or when multiple files have the same tag
- It effectively breaks Coverity scans

svn path=/trunk/; revision=50149
This commit is contained in:
Timo Kreuzer 2010-12-26 15:23:03 +00:00
parent 9db755b143
commit 07bf61809e
25 changed files with 1256 additions and 1281 deletions

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::INIT"
#define MODULE_INVOLVED_IN_ARM3
#include "../../ARM3/miarm.h"

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::CONTMEM"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -32,7 +31,7 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
KIRQL OldIrql;
PAGED_CODE();
ASSERT(SizeInPages != 0);
//
// Convert the boundary PFN into an alignment mask
//
@ -40,7 +39,7 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
/* Disable APCs */
KeEnterGuardedRegion();
//
// Loop all the physical memory blocks
//
@ -51,23 +50,23 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
//
Page = MmPhysicalMemoryBlock->Run[i].BasePage;
PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
//
// Check how far this memory block will go
//
LastPage = Page + PageCount;
//
// Trim it down to only the PFNs we're actually interested in
//
if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
if (Page < LowestPfn) Page = LowestPfn;
//
// Skip this run if it's empty or fails to contain all the pages we need
//
if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
//
// Now scan all the relevant PFNs in this run
//
@ -82,7 +81,7 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
Length = 0;
continue;
}
//
// If we haven't chosen a start PFN yet and the caller specified an
// alignment, make sure the page matches the alignment restriction
@ -95,7 +94,7 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
//
continue;
}
//
// Increase the number of valid pages, and check if we have enough
//
@ -106,7 +105,7 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
//
Pfn1 -= (Length - 1);
Page -= (Length - 1);
//
// Acquire the PFN lock
//
@ -117,7 +116,7 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
// Things might've changed for us. Is the page still free?
//
if (MiIsPfnInUse(Pfn1)) break;
//
// So far so good. Is this the last confirmed valid page?
//
@ -127,7 +126,7 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
// Sanity check that we didn't go out of bounds
//
ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
//
// Loop until all PFN entries have been processed
//
@ -148,43 +147,43 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
Pfn1->u3.e1.PrototypePte = 0;
Pfn1->u4.VerifierAllocation = 0;
Pfn1->PteAddress = (PVOID)0xBAADF00D;
//
// Check if this is the last PFN, otherwise go on
//
if (Pfn1 == EndPfn) break;
Pfn1--;
} while (TRUE);
//
// Mark the first and last PFN so we can find them later
//
Pfn1->u3.e1.StartOfAllocation = 1;
(Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
//
// Now it's safe to let go of the PFN lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// Quick sanity check that the last PFN is consistent
//
EndPfn = Pfn1 + SizeInPages;
ASSERT(EndPfn == MI_PFN_ELEMENT(Page + 1));
//
// Compute the first page, and make sure it's consistent
//
Page = Page - SizeInPages + 1;
ASSERT(Pfn1 == MI_PFN_ELEMENT(Page));
ASSERT(Page != 0);
/* Enable APCs and return the page */
KeLeaveGuardedRegion();
return Page;
return Page;
}
//
// Keep going. The purpose of this loop is to reconfirm that
// after acquiring the PFN lock these pages are still usable
@ -192,7 +191,7 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
Pfn1++;
Page++;
} while (TRUE);
//
// If we got here, something changed while we hadn't acquired
// the PFN lock yet, so we'll have to restart
@ -202,11 +201,11 @@ MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
}
}
} while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
//
// And if we get here, it means no suitable physical memory runs were found
//
return 0;
return 0;
}
PVOID
@ -221,7 +220,7 @@ MiCheckForContiguousMemory(IN PVOID BaseAddress,
{
PMMPTE StartPte, EndPte;
PFN_NUMBER PreviousPage = 0, Page, HighPage, BoundaryMask, Pages = 0;
//
// Okay, first of all check if the PFNs match our restrictions
//
@ -229,13 +228,13 @@ MiCheckForContiguousMemory(IN PVOID BaseAddress,
if (LowestPfn + SizeInPages <= LowestPfn) return NULL;
if (LowestPfn + SizeInPages - 1 > HighestPfn) return NULL;
if (BaseAddressPages < SizeInPages) return NULL;
//
// This is the last page we need to get to and the boundary requested
//
HighPage = HighestPfn + 1 - SizeInPages;
BoundaryMask = ~(BoundaryPfn - 1);
//
// And here's the PTEs for this allocation. Let's go scan them.
//
@ -248,7 +247,7 @@ MiCheckForContiguousMemory(IN PVOID BaseAddress,
//
ASSERT (StartPte->u.Hard.Valid == 1);
Page = PFN_FROM_PTE(StartPte);
//
// Is this the beginning of our adventure?
//
@ -271,7 +270,7 @@ MiCheckForContiguousMemory(IN PVOID BaseAddress,
Pages++;
}
}
//
// Have we found all the pages we need by now?
// Incidently, this means you only wanted one page
@ -297,7 +296,7 @@ MiCheckForContiguousMemory(IN PVOID BaseAddress,
Pages = 0;
continue;
}
//
// Otherwise, we're still in the game. Do we have all our pages?
//
@ -309,7 +308,7 @@ MiCheckForContiguousMemory(IN PVOID BaseAddress,
return MiPteToAddress(StartPte - Pages + 1);
}
}
//
// Try with the next PTE, remember this PFN
//
@ -317,7 +316,7 @@ MiCheckForContiguousMemory(IN PVOID BaseAddress,
StartPte++;
continue;
}
//
// All good returns are within the loop...
//
@ -349,14 +348,14 @@ MiFindContiguousMemory(IN PFN_NUMBER LowestPfn,
SizeInPages,
CacheType);
if (!Page) return NULL;
//
// We'll just piggyback on the I/O memory mapper
//
PhysicalAddress.QuadPart = Page << PAGE_SHIFT;
BaseAddress = MmMapIoSpace(PhysicalAddress, SizeInPages << PAGE_SHIFT, CacheType);
ASSERT(BaseAddress);
/* Loop the PFN entries */
Pfn1 = MiGetPfnEntry(Page);
EndPfn = Pfn1 + SizeInPages;
@ -367,7 +366,7 @@ MiFindContiguousMemory(IN PFN_NUMBER LowestPfn,
Pfn1->PteAddress = PointerPte;
Pfn1->u4.PteFrame = PFN_FROM_PTE(MiAddressToPte(PointerPte++));
} while (++Pfn1 < EndPfn);
/* Return the address */
return BaseAddress;
}
@ -389,12 +388,12 @@ MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
//
ASSERT(NumberOfBytes != 0);
ASSERT(CacheType <= MmWriteCombined);
//
// Compute size requested
//
SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
//
// Convert the cache attribute and check for cached requests
//
@ -409,7 +408,7 @@ MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
NumberOfBytes,
'mCmM');
if (BaseAddress)
{
{
//
// Now make sure it's actually contiguous (if it came from expansion
// it might not be).
@ -427,20 +426,20 @@ MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
//
return BaseAddress;
}
//
// No such luck
//
ExFreePool(BaseAddress);
}
}
//
// According to MSDN, the system won't try anything else if you're higher
// than APC level.
//
if (KeGetCurrentIrql() > APC_LEVEL) return NULL;
//
// Otherwise, we'll go try to find some
//
@ -460,7 +459,7 @@ MiFreeContiguousMemory(IN PVOID BaseAddress)
PMMPFN Pfn1, StartPfn;
PMMPTE PointerPte;
PAGED_CODE();
//
// First, check if the memory came from initial nonpaged pool, or expansion
//
@ -476,15 +475,15 @@ MiFreeContiguousMemory(IN PVOID BaseAddress)
ExFreePool(BaseAddress);
return;
}
/* Get the PTE and frame number for the allocation*/
PointerPte = MiAddressToPte(BaseAddress);
PageFrameIndex = PFN_FROM_PTE(PointerPte);
//
// Now get the PFN entry for this, and make sure it's the correct one
//
Pfn1 = MiGetPfnEntry(PageFrameIndex);
Pfn1 = MiGetPfnEntry(PageFrameIndex);
if ((!Pfn1) || (Pfn1->u3.e1.StartOfAllocation == 0))
{
//
@ -496,13 +495,13 @@ MiFreeContiguousMemory(IN PVOID BaseAddress)
0,
0);
}
//
// Now this PFN isn't the start of any allocation anymore, it's going out
//
StartPfn = Pfn1;
Pfn1->u3.e1.StartOfAllocation = 0;
/* Loop the PFNs until we find the one that marks the end of the allocation */
do
{
@ -513,35 +512,35 @@ MiFreeContiguousMemory(IN PVOID BaseAddress)
ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
ASSERT(Pfn1->u4.VerifierAllocation == 0);
ASSERT(Pfn1->u3.e1.PrototypePte == 0);
/* Set the special pending delete marker */
MI_SET_PFN_DELETED(Pfn1);
/* Keep going for assertions */
PointerPte++;
} while (Pfn1++->u3.e1.EndOfAllocation == 0);
//
// Found it, unmark it
//
Pfn1--;
Pfn1->u3.e1.EndOfAllocation = 0;
//
// Now compute how many pages this represents
//
PageCount = (ULONG)(Pfn1 - StartPfn + 1);
//
// So we can know how much to unmap (recall we piggyback on I/O mappings)
//
MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);
//
// Lock the PFN database
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
// Loop all the pages
//
@ -552,7 +551,7 @@ MiFreeContiguousMemory(IN PVOID BaseAddress)
/* Decrement the share count and move on */
MiDecrementShareCount(Pfn1++, PageFrameIndex++);
} while (PageFrameIndex < LastPage);
//
// Release the PFN lock
//
@ -579,33 +578,33 @@ MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes,
//
ASSERT(NumberOfBytes != 0);
ASSERT(CacheType <= MmWriteCombined);
//
// Convert the lowest address into a PFN
//
LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) LowestPfn++;
//
// Convert and validate the boundary address into a PFN
//
if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) return NULL;
BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
//
// Convert the highest address into a PFN
//
HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
//
// Validate the PFN bounds
//
if (LowestPfn > HighestPfn) return NULL;
//
// Let the contiguous memory allocator handle it
//
//
return MiAllocateContiguousMemory(NumberOfBytes,
LowestPfn,
HighestPfn,
@ -633,10 +632,10 @@ MmAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
//
HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
//
// Let the contiguous memory allocator handle it
//
//
return MiAllocateContiguousMemory(NumberOfBytes, 0, HighestPfn, 0, MmCached);
}

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::DRVMGMT"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -91,18 +90,18 @@ MmAddVerifierThunks(IN PVOID ThunkBuffer,
ULONG i;
NTSTATUS Status = STATUS_SUCCESS;
PAGED_CODE();
//
// Make sure the driver verifier is initialized
//
if (!MiVerifierDriverAddedThunkListHead.Flink) return STATUS_NOT_SUPPORTED;
//
// Get the thunk pairs and count them
//
ThunkCount = ThunkBufferSize / sizeof(DRIVER_VERIFIER_THUNK_PAIRS);
if (!ThunkCount) return STATUS_INVALID_PARAMETER_1;
//
// Now allocate our own thunk table
//
@ -112,7 +111,7 @@ MmAddVerifierThunks(IN PVOID ThunkBuffer,
sizeof(DRIVER_VERIFIER_THUNK_PAIRS),
'tVmM');
if (!DriverThunks) return STATUS_INSUFFICIENT_RESOURCES;
//
// Now copy the driver-fed part
//
@ -120,7 +119,7 @@ MmAddVerifierThunks(IN PVOID ThunkBuffer,
RtlCopyMemory(ThunkTable,
ThunkBuffer,
ThunkCount * sizeof(DRIVER_VERIFIER_THUNK_PAIRS));
//
// Acquire the system load lock
//
@ -130,7 +129,7 @@ MmAddVerifierThunks(IN PVOID ThunkBuffer,
KernelMode,
FALSE,
NULL);
//
// Get the loader entry
//
@ -143,13 +142,13 @@ MmAddVerifierThunks(IN PVOID ThunkBuffer,
Status = STATUS_INVALID_PARAMETER_2;
goto Cleanup;
}
//
// Get driver base and end
//
ModuleBase = LdrEntry->DllBase;
ModuleEnd = (PVOID)((ULONG_PTR)LdrEntry->DllBase + LdrEntry->SizeOfImage);
//
// Don't allow hooking the kernel or HAL
//
@ -161,7 +160,7 @@ MmAddVerifierThunks(IN PVOID ThunkBuffer,
Status = STATUS_INVALID_PARAMETER_2;
goto Cleanup;
}
//
// Loop all the thunks
//
@ -180,7 +179,7 @@ MmAddVerifierThunks(IN PVOID ThunkBuffer,
goto Cleanup;
}
}
//
// Otherwise, add this entry
//
@ -190,14 +189,14 @@ MmAddVerifierThunks(IN PVOID ThunkBuffer,
InsertTailList(&MiVerifierDriverAddedThunkListHead,
&DriverThunks->ListEntry);
DriverThunks = NULL;
Cleanup:
//
// Release the lock
//
KeReleaseMutant(&MmSystemLoadLock, 1, FALSE, FALSE);
KeLeaveCriticalRegion();
//
// Free the table if we failed and return status
//
@ -213,13 +212,13 @@ NTAPI
MmIsDriverVerifying(IN PDRIVER_OBJECT DriverObject)
{
PLDR_DATA_TABLE_ENTRY LdrEntry;
//
// Get the loader entry
//
LdrEntry = (PLDR_DATA_TABLE_ENTRY)DriverObject->DriverSection;
if (!LdrEntry) return FALSE;
//
// Check if we're verifying or not
//
@ -244,7 +243,7 @@ MmIsVerifierEnabled(OUT PULONG VerifierFlags)
*VerifierFlags = MmVerifierData.Level;
return STATUS_SUCCESS;
}
//
// Otherwise, we're disabled
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::DYNAMIC"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -74,7 +73,7 @@ NTAPI
MmGetPhysicalMemoryRanges(VOID)
{
ULONG Size, i;
PPHYSICAL_MEMORY_RANGE Entry, Buffer;
PPHYSICAL_MEMORY_RANGE Entry, Buffer;
KIRQL OldIrql;
ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
@ -82,7 +81,7 @@ MmGetPhysicalMemoryRanges(VOID)
// Calculate how much memory we'll need
//
Size = sizeof(PHYSICAL_MEMORY_RANGE) * (MmPhysicalMemoryBlock->NumberOfRuns + 1);
//
// Allocate a copy
//
@ -93,13 +92,13 @@ MmGetPhysicalMemoryRanges(VOID)
// Lock the PFN database
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
// Make sure it hasn't changed before we had acquired the lock
//
ASSERT(Size == (sizeof(PHYSICAL_MEMORY_RANGE) *
ASSERT(Size == (sizeof(PHYSICAL_MEMORY_RANGE) *
(MmPhysicalMemoryBlock->NumberOfRuns + 1)));
//
// Now loop our block
//
@ -112,7 +111,7 @@ MmGetPhysicalMemoryRanges(VOID)
Entry->NumberOfBytes.QuadPart = MmPhysicalMemoryBlock->Run[i].PageCount << PAGE_SHIFT;
Entry++;
}
//
// Last entry is empty
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::EXPOOL"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -53,14 +52,14 @@ PKGUARDED_MUTEX ExpPagedPoolMutex;
*/
PLIST_ENTRY
NTAPI
ExpDecodePoolLink(IN PLIST_ENTRY Link)
ExpDecodePoolLink(IN PLIST_ENTRY Link)
{
return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
}
PLIST_ENTRY
NTAPI
ExpEncodePoolLink(IN PLIST_ENTRY Link)
ExpEncodePoolLink(IN PLIST_ENTRY Link)
{
return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
}
@ -104,7 +103,7 @@ ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
Flink->Blink = ExpEncodePoolLink(Blink);
Blink->Flink = ExpEncodePoolLink(Flink);
}
PLIST_ENTRY
NTAPI
ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
@ -170,7 +169,7 @@ ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
{
/* Get it */
PreviousEntry = POOL_PREV_BLOCK(Entry);
/* The two blocks must be on the same page! */
if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
{
@ -251,31 +250,31 @@ ExpCheckPoolBlocks(IN PVOID Block)
BOOLEAN FoundBlock = FALSE;
SIZE_T Size = 0;
PPOOL_HEADER Entry;
/* Get the first entry for this page, make sure it really is the first */
Entry = PAGE_ALIGN(Block);
ASSERT(Entry->PreviousSize == 0);
/* Now scan each entry */
while (TRUE)
{
/* When we actually found our block, remember this */
if (Entry == Block) FoundBlock = TRUE;
/* Now validate this block header */
ExpCheckPoolHeader(Entry);
/* And go to the next one, keeping track of our size */
Size += Entry->BlockSize;
Entry = POOL_NEXT_BLOCK(Entry);
/* If we hit the last block, stop */
if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
/* If we hit the end of the page, stop */
if (PAGE_ALIGN(Entry) == Entry) break;
}
/* We must've found our block, and we must have hit the end of the page */
if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
{
@ -304,7 +303,7 @@ ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
PoolDescriptor->PoolIndex = PoolIndex;
PoolDescriptor->Threshold = Threshold;
PoolDescriptor->LockAddress = PoolLock;
//
// Initialize accounting data
//
@ -313,18 +312,18 @@ ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
PoolDescriptor->TotalPages = 0;
PoolDescriptor->TotalBytes = 0;
PoolDescriptor->TotalBigPages = 0;
//
// Nothing pending for now
//
PoolDescriptor->PendingFrees = NULL;
PoolDescriptor->PendingFreeDepth = 0;
//
// Loop all the descriptor's allocation lists and initialize them
//
NextEntry = PoolDescriptor->ListHeads;
LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
while (NextEntry < LastEntry)
{
ExpInitializePoolListHead(NextEntry);
@ -339,7 +338,7 @@ InitializePool(IN POOL_TYPE PoolType,
IN ULONG Threshold)
{
PPOOL_DESCRIPTOR Descriptor;
//
// Check what kind of pool this is
//
@ -375,7 +374,7 @@ InitializePool(IN POOL_TYPE PoolType,
-1,
-1);
}
//
// Setup the vector and guarded mutex for paged pool
//
@ -461,14 +460,14 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
ASSERT(Tag != 0);
ASSERT(Tag != ' GIB');
ASSERT(NumberOfBytes != 0);
//
// Get the pool type and its corresponding vector for this request
//
PoolType = PoolType & BASE_POOL_TYPE_MASK;
PoolDesc = PoolVector[PoolType];
ASSERT(PoolDesc != NULL);
//
// Check if this is a big page allocation
//
@ -479,13 +478,13 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
//
return MiAllocatePoolPages(PoolType, NumberOfBytes);
}
//
// Should never request 0 bytes from the pool, but since so many drivers do
// it, we'll just assume they want 1 byte, based on NT's similar behavior
//
if (!NumberOfBytes) NumberOfBytes = 1;
//
// A pool allocation is defined by its data, a linked list to connect it to
// the free list (if necessary), and a pool header to store accounting info.
@ -514,7 +513,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
// Acquire the pool lock now
//
OldIrql = ExLockPool(PoolDesc);
//
// And make sure the list still has entries
//
@ -530,7 +529,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
ListHead++;
continue;
}
//
// Remove a free entry from the list
// Note that due to the way we insert free blocks into multiple lists
@ -543,7 +542,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
ExpCheckPoolBlocks(Entry);
ASSERT(Entry->BlockSize >= i);
ASSERT(Entry->PoolType == 0);
//
// Check if this block is larger that what we need. The block could
// not possibly be smaller, due to the reason explained above (and
@ -563,12 +562,12 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
//
FragmentEntry = POOL_BLOCK(Entry, i);
FragmentEntry->BlockSize = Entry->BlockSize - i;
//
// And make it point back to us
//
FragmentEntry->PreviousSize = i;
//
// Now get the block that follows the new fragment and check
// if it's still on the same page as us (and not at the end)
@ -581,7 +580,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
// fragment block
//
NextEntry->PreviousSize = FragmentEntry->BlockSize;
}
}
}
else
{
@ -590,13 +589,13 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
// so we'll make this entry the fragment instead
//
FragmentEntry = Entry;
//
// And then we'll remove from it the actual size required.
// Now the entry is a leftover free fragment
//
Entry->BlockSize -= i;
//
// Now let's go to the next entry after the fragment (which
// used to point to our original free entry) and make it
@ -607,7 +606,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
//
Entry = POOL_NEXT_BLOCK(Entry);
Entry->PreviousSize = FragmentEntry->BlockSize;
//
// And now let's go to the entry after that one and check if
// it's still on the same page, and not at the end
@ -621,12 +620,12 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
NextEntry->PreviousSize = i;
}
}
//
// Now our (allocation) entry is the right size
//
Entry->BlockSize = i;
//
// And the next entry is now the free fragment which contains
// the remaining difference between how big the original entry
@ -634,10 +633,10 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
//
FragmentEntry->PoolType = 0;
BlockSize = FragmentEntry->BlockSize;
//
// Now check if enough free bytes remained for us to have a
// "full" entry, which contains enough bytes for a linked list
// Now check if enough free bytes remained for us to have a
// "full" entry, which contains enough bytes for a linked list
// and thus can be used for allocations (up to 8 bytes...)
//
ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
@ -651,7 +650,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
}
}
//
// We have found an entry for this allocation, so set the pool type
// and release the lock since we're done
@ -669,7 +668,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
return POOL_FREE_BLOCK(Entry);
}
} while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
//
// There were no free entries left, so we have to allocate a new fresh page
//
@ -678,7 +677,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
Entry->Ulong1 = 0;
Entry->BlockSize = i;
Entry->PoolType = PoolType + 1;
//
// This page will have two entries -- one for the allocation (which we just
// created above), and one for the remaining free bytes, which we're about
@ -690,7 +689,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
FragmentEntry->Ulong1 = 0;
FragmentEntry->BlockSize = BlockSize;
FragmentEntry->PreviousSize = i;
//
// Now check if enough free bytes remained for us to have a "full" entry,
// which contains enough bytes for a linked list and thus can be used for
@ -710,7 +709,7 @@ ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
POOL_FREE_BLOCK(FragmentEntry));
ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
//
// Release the pool lock
//
@ -763,14 +762,14 @@ ExFreePoolWithTag(IN PVOID P,
MiFreePoolPages(P);
return;
}
//
// Get the entry for this pool allocation
// The pointer math here may look wrong or confusing, but it is quite right
//
Entry = P;
Entry--;
//
// Get the size of the entry, and it's pool type, then load the descriptor
// for this pool type
@ -804,7 +803,7 @@ ExFreePoolWithTag(IN PVOID P,
// The next block is free, so we'll do a combine
//
Combined = TRUE;
//
// Make sure there's actual data in the block -- anything smaller
// than this means we only have the header, so there's no linked list
@ -821,7 +820,7 @@ ExFreePoolWithTag(IN PVOID P,
ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
}
//
// Our entry is now combined with the next entry
//
@ -844,7 +843,7 @@ ExFreePoolWithTag(IN PVOID P,
// It is, so we can do a combine
//
Combined = TRUE;
//
// Make sure there's actual data in the block -- anything smaller
// than this means we only have the header so there's no linked list
@ -861,20 +860,20 @@ ExFreePoolWithTag(IN PVOID P,
ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
}
//
// Combine our original block (which might've already been combined
// with the next block), into the previous block
//
NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
//
// And now we'll work with the previous block instead
//
Entry = NextEntry;
}
}
//
// By now, it may have been possible for our combined blocks to actually
// have made up a full page (if there were only 2-3 allocations on the
@ -897,7 +896,7 @@ ExFreePoolWithTag(IN PVOID P,
Entry->PoolType = 0;
BlockSize = Entry->BlockSize;
ASSERT(BlockSize != 1);
//
// Check if we actually did combine it with anyone
//
@ -908,14 +907,14 @@ ExFreePoolWithTag(IN PVOID P,
// the one after the original, depending if we combined with the previous)
//
NextEntry = POOL_NEXT_BLOCK(Entry);
//
// As long as the next block isn't on a page boundary, have it point
// back to us
//
if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
}
//
// Insert this new free block, and release the pool lock
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::HYPERMAP"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -125,7 +124,7 @@ MiMapPagesToZeroInHyperSpace(IN PMMPFN Pfn1,
ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
ASSERT(NumberOfPages != 0);
ASSERT(NumberOfPages <= (MI_ZERO_PTES - 1));
//
// Pick the first zeroing PTE
//
@ -144,39 +143,39 @@ MiMapPagesToZeroInHyperSpace(IN PMMPFN Pfn1,
PointerPte->u.Hard.PageFrameNumber = Offset;
KeFlushProcessTb();
}
//
// Prepare the next PTE
//
PointerPte->u.Hard.PageFrameNumber = Offset - NumberOfPages;
/* Choose the correct PTE to use, and which template */
PointerPte += (Offset + 1);
TempPte = ValidKernelPte;
MI_MAKE_LOCAL_PAGE(&TempPte); // Hyperspace is local!
/* Make sure the list isn't empty and loop it */
ASSERT(Pfn1 != (PVOID)LIST_HEAD);
while (Pfn1 != (PVOID)LIST_HEAD)
{
/* Get the page index for this PFN */
PageFrameIndex = MiGetPfnEntryIndex(Pfn1);
//
// Write the PFN
//
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
//
// Set the correct PTE to write to, and set its new value
//
PointerPte--;
MI_WRITE_VALID_PTE(PointerPte, TempPte);
/* Move to the next PFN */
Pfn1 = (PMMPFN)Pfn1->u1.Flink;
}
//
// Return the address
//
@ -189,14 +188,14 @@ MiUnmapPagesInZeroSpace(IN PVOID VirtualAddress,
IN PFN_NUMBER NumberOfPages)
{
PMMPTE PointerPte;
//
// Sanity checks
//
ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
ASSERT (NumberOfPages != 0);
ASSERT (NumberOfPages <= (MI_ZERO_PTES - 1));
//
// Get the first PTE for the mapped zero VA
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::INIT:X86"
#define MODULE_INVOLVED_IN_ARM3
#include "../../ARM3/miarm.h"
@ -28,7 +27,7 @@ MMPTE DemandZeroPte = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BI
/* Template PTE for prototype page */
MMPTE PrototypePte = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS) | PTE_PROTOTYPE | (MI_PTE_LOOKUP_NEEDED << PAGE_SHIFT)};
/* PRIVATE FUNCTIONS **********************************************************/
VOID
@ -37,7 +36,7 @@ INIT_FUNCTION
MiComputeNonPagedPoolVa(IN ULONG FreePages)
{
IN PFN_NUMBER PoolPages;
/* Check if this is a machine with less than 256MB of RAM, and no overide */
if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
!(MmSizeOfNonPagedPoolInBytes))
@ -45,17 +44,17 @@ MiComputeNonPagedPoolVa(IN ULONG FreePages)
/* Force the non paged pool to be 2MB so we can reduce RAM usage */
MmSizeOfNonPagedPoolInBytes = 2 * _1MB;
}
/* Hyperspace ends here */
MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
/* Check if the user gave a ridicuously large nonpaged pool RAM size */
if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > (FreePages * 7 / 8))
{
/* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
MmSizeOfNonPagedPoolInBytes = 0;
}
/* Check if no registry setting was set, or if the setting was too low */
if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
{
@ -63,30 +62,30 @@ MiComputeNonPagedPoolVa(IN ULONG FreePages)
MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
MmSizeOfNonPagedPoolInBytes += (FreePages - 1024) / 256 * MmMinAdditionNonPagedPoolPerMb;
}
/* Check if the registy setting or our dynamic calculation was too high */
if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
{
/* Set it to the maximum */
MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
}
/* Check if a percentage cap was set through the registry */
if (MmMaximumNonPagedPoolPercent) UNIMPLEMENTED;
/* Page-align the nonpaged pool size */
MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
/* Now, check if there was a registry size for the maximum size */
if (!MmMaximumNonPagedPoolInBytes)
{
/* Start with the default (1MB) */
MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
/* Add space for PFN database */
MmMaximumNonPagedPoolInBytes += (ULONG)
PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
/* Check if the machine has more than 512MB of free RAM */
if (FreePages >= 0x1F000)
{
@ -106,7 +105,7 @@ MiComputeNonPagedPoolVa(IN ULONG FreePages)
MmMaxAdditionNonPagedPoolPerMb;
}
}
/* Make sure there's at least 16 pages + the PFN available for expansion */
PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) * sizeof(MMPFN));
@ -115,17 +114,17 @@ MiComputeNonPagedPoolVa(IN ULONG FreePages)
/* The maximum should be at least high enough to cover all the above */
MmMaximumNonPagedPoolInBytes = PoolPages;
}
/* Systems with 2GB of kernel address space get double the size */
PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
/* On the other hand, make sure that PFN + nonpaged pool doesn't get too big */
if (MmMaximumNonPagedPoolInBytes > PoolPages)
{
/* Trim it down to the maximum architectural limit (256MB) */
MmMaximumNonPagedPoolInBytes = PoolPages;
}
/* Check if this is a system with > 128MB of non paged pool */
if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
{
@ -134,7 +133,7 @@ MiComputeNonPagedPoolVa(IN ULONG FreePages)
MI_MAX_NONPAGED_POOL_SIZE))
{
/* FIXME: Should check if the initial pool can be expanded */
/* Assume no expansion possible, check ift he maximum is too large */
if (MmMaximumNonPagedPoolInBytes > (MmSizeOfNonPagedPoolInBytes +
MI_MAX_NONPAGED_POOL_SIZE))
@ -142,7 +141,7 @@ MiComputeNonPagedPoolVa(IN ULONG FreePages)
/* Set it to the initial value plus the boost */
MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes +
MI_MAX_NONPAGED_POOL_SIZE;
}
}
}
}
}
@ -162,7 +161,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
KIRQL OldIrql;
PMMPFN Pfn1;
ULONG Flags;
/* Check for kernel stack size that's too big */
if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
{
@ -173,18 +172,18 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
/* Take the registry setting, and convert it into bytes */
MmLargeStackSize *= _1KB;
/* Now align it to a page boundary */
MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
/* Sanity checks */
ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
/* Make sure it's not too low */
if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
}
/* Check for global bit */
#if 0
if (KeFeatureBits & KF_GLOBAL_PAGE)
@ -197,21 +196,21 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Now templates are ready */
TempPte = ValidKernelPte;
TempPde = ValidKernelPde;
//
// Set CR3 for the system process
//
PointerPte = MiAddressToPde(PDE_BASE);
PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
//
// Blow away user-mode
//
StartPde = MiAddressToPde(0);
EndPde = MiAddressToPde(KSEG0_BASE);
RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
//
// Loop the memory descriptors
//
@ -224,7 +223,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
MdBlock = CONTAINING_RECORD(NextEntry,
MEMORY_ALLOCATION_DESCRIPTOR,
ListEntry);
//
// Skip invisible memory
//
@ -243,7 +242,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
MmNumberOfPhysicalPages += MdBlock->PageCount;
}
//
// Check if this is the new lowest page
//
@ -254,7 +253,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
MmLowestPhysicalPage = MdBlock->BasePage;
}
//
// Check if this is the new highest page
//
@ -266,7 +265,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
MmHighestPhysicalPage = PageFrameIndex - 1;
}
//
// Check if this is free memory
//
@ -285,32 +284,32 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
MxFreeDescriptor = MdBlock;
}
//
// More free pages
//
FreePages += MdBlock->PageCount;
}
}
//
// Keep going
//
NextEntry = MdBlock->ListEntry.Flink;
}
//
// Save original values of the free descriptor, since it'll be
// altered by early allocations
//
MxOldFreeDescriptor = *MxFreeDescriptor;
/* Compute non paged pool limits and size */
MiComputeNonPagedPoolVa(FreePages);
/* Compute color information (L2 cache-separated paging lists) */
MiComputeColorInformation();
//
// Calculate the number of bytes for the PFN database
// then add the color tables and convert to pages
@ -318,7 +317,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
MxPfnAllocation >>= PAGE_SHIFT;
//
// We have to add one to the count here, because in the process of
// shifting down to the page size, we actually ended up getting the
@ -328,7 +327,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
// 0x60000 bytes.
//
MxPfnAllocation++;
//
// Now calculate the nonpaged pool expansion VA region
//
@ -339,7 +338,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
NonPagedPoolExpansionVa = MmNonPagedPoolStart;
DPRINT("NP Pool has been tuned to: %d bytes and %d bytes\n",
MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
//
// Now calculate the nonpaged system VA region, which includes the
// nonpaged pool expansion (above) and the system PTEs. Note that it is
@ -349,7 +348,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
(MmNumberOfSystemPtes + 1) * PAGE_SIZE);
MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
~(PDE_MAPPED_VA - 1));
//
// Don't let it go below the minimum
//
@ -359,7 +358,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
// This is a hard-coded limit in the Windows NT address space
//
MmNonPagedSystemStart = (PVOID)0xEB000000;
//
// Reduce the amount of system PTEs to reach this point
//
@ -369,7 +368,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
MmNumberOfSystemPtes--;
ASSERT(MmNumberOfSystemPtes > 1000);
}
//
// Check if we are in a situation where the size of the paged pool
// is so large that it overflows into nonpaged pool
@ -382,7 +381,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
DPRINT1("Paged pool is too big!\n");
}
//
// Normally, the PFN database should start after the loader images.
// This is already the case in ReactOS, but for now we want to co-exist
@ -391,7 +390,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
MmPfnDatabase = (PVOID)0xB0000000;
ASSERT(((ULONG_PTR)MmPfnDatabase & (PDE_MAPPED_VA - 1)) == 0);
//
// Non paged pool comes after the PFN database
//
@ -424,13 +423,13 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
MI_WRITE_VALID_PTE(StartPde, TempPde);
//
// Zero out the page table
//
PointerPte = MiPteToAddress(StartPde);
RtlZeroMemory(PointerPte, PAGE_SIZE);
//
// Next
//
@ -456,7 +455,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
//
PointerPte = MiPteToAddress(StartPde);
RtlZeroMemory(PointerPte, PAGE_SIZE);
//
// Next
//
@ -482,23 +481,23 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
MI_WRITE_VALID_PTE(PointerPte++, TempPte);
}
//
// Sanity check: make sure we have properly defined the system PTE space
//
ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
MiAddressToPte(MmNonPagedPoolExpansionStart));
/* Now go ahead and initialize the nonpaged pool */
MiInitializeNonPagedPool();
MiInitializeNonPagedPoolThresholds();
/* Map the PFN database pages */
MiMapPfnDatabase(LoaderBlock);
/* Initialize the color tables */
MiInitializeColorTables();
/* Build the PFN Database */
MiInitializePfnDatabase(LoaderBlock);
MmInitializeBalancer(MmAvailablePages, 0);
@ -507,12 +506,12 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
// Reset the descriptor back so we can create the correct memory blocks
//
*MxFreeDescriptor = MxOldFreeDescriptor;
//
// Initialize the nonpaged pool
//
InitializePool(NonPagedPool, 0);
//
// We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
//
@ -522,18 +521,18 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
MmNumberOfSystemPtes--;
DPRINT("Final System PTE count: %d (%d bytes)\n",
MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
//
// Create the system PTE space
//
MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
/* Get the PDE For hyperspace */
StartPde = MiAddressToPde(HYPER_SPACE);
/* Lock PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Allocate a page for hyperspace and create it */
MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
MI_SET_PROCESS2("Kernel");
@ -541,26 +540,26 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
TempPde.u.Hard.Global = FALSE; // Hyperspace is local!
MI_WRITE_VALID_PTE(StartPde, TempPde);
/* Flush the TLB */
KeFlushCurrentTb();
/* Release the lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// Zero out the page table now
//
PointerPte = MiAddressToPte(HYPER_SPACE);
RtlZeroMemory(PointerPte, PAGE_SIZE);
//
// Setup the mapping PTEs
//
MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
/* Set the working set address */
MmWorkingSetList = (PVOID)MI_WORKING_SET_LIST;
@ -570,39 +569,39 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
SystemPteSpace);
RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
//
// Set the counter to maximum to boot with
//
MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
/* Lock PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Reset the ref/share count so that MmInitializeProcessAddressSpace works */
Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(MiAddressToPde(PDE_BASE)));
Pfn1->u2.ShareCount = 0;
Pfn1->u3.e2.ReferenceCount = 0;
/* Get a page for the working set list */
MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
MI_SET_PROCESS2("Kernel WS List");
PageFrameIndex = MiRemoveAnyPage(0);
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
/* Map the working set list */
PointerPte = MiAddressToPte(MmWorkingSetList);
MI_WRITE_VALID_PTE(PointerPte, TempPte);
/* Zero it out, and save the frame index */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
PsGetCurrentProcess()->WorkingSetPage = PageFrameIndex;
/* Check for Pentium LOCK errata */
if (KiI386PentiumLockErrataPresent)
{
/* Mark the 1st IDT page as Write-Through to prevent a lockup
on a F00F instruction.
on a F00F instruction.
See http://www.rcollins.org/Errata/Dec97/F00FBug.html */
PointerPte = MiAddressToPte(KeGetPcr()->IDT);
PointerPte->u.Hard.WriteThrough = 1;
@ -614,7 +613,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Initialize the bogus address space */
Flags = 0;
MmInitializeProcessAddressSpace(PsGetCurrentProcess(), NULL, NULL, &Flags, NULL);
/* Make sure the color lists are valid */
ASSERT(MmFreePagesByColor[0] < (PMMCOLOR_TABLES)PTE_BASE);
StartPde = MiAddressToPde(MmFreePagesByColor[0]);
@ -639,11 +638,11 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
Pfn1->u3.e1.PageLocation = ActiveAndValid;
Pfn1->u3.e1.CacheAttribute = MiCached;
}
/* Keep going */
PointerPte++;
}
/* All done */
return STATUS_SUCCESS;
}

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::IOSUP"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -49,7 +48,7 @@ MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
IN SIZE_T NumberOfBytes,
IN MEMORY_CACHING_TYPE CacheType)
{
PFN_NUMBER Pfn, PageCount;
PMMPTE PointerPte;
PVOID BaseAddress;
@ -78,13 +77,13 @@ MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
//
CacheType &= 0xFF;
if (CacheType >= MmMaximumCacheType) return NULL;
//
// Calculate page count
//
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(PhysicalAddress.LowPart,
NumberOfBytes);
//
// Compute the PFN and check if it's a known I/O mapping
// Also translate the cache attribute
@ -93,14 +92,14 @@ MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
Pfn1 = MiGetPfnEntry(Pfn);
IsIoMapping = (Pfn1 == NULL) ? TRUE : FALSE;
CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
//
// Now allocate system PTEs for the mapping, and get the VA
//
PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
if (!PointerPte) return NULL;
BaseAddress = MiPteToAddress(PointerPte);
//
// Check if this is uncached
//
@ -112,13 +111,13 @@ MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
KeFlushEntireTb(TRUE, TRUE);
KeInvalidateAllCaches();
}
//
// Now compute the VA offset
//
BaseAddress = (PVOID)((ULONG_PTR)BaseAddress +
BYTE_OFFSET(PhysicalAddress.LowPart));
//
// Get the template and configure caching
//
@ -126,38 +125,38 @@ MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
switch (CacheAttribute)
{
case MiNonCached:
//
// Disable the cache
//
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_THROUGH(&TempPte);
break;
case MiCached:
//
// Leave defaults
//
break;
case MiWriteCombined:
//
// We don't support write combining yet
//
ASSERT(FALSE);
break;
default:
//
// Should never happen
//
ASSERT(FALSE);
break;
}
//
// Sanity check and re-flush
//
@ -165,7 +164,7 @@ MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
ASSERT((Pfn1 == MiGetPfnEntry(Pfn)) || (Pfn1 == NULL));
KeFlushEntireTb(TRUE, TRUE);
KeInvalidateAllCaches();
//
// Do the mapping
//
@ -177,7 +176,7 @@ MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
TempPte.u.Hard.PageFrameNumber = Pfn++;
MI_WRITE_VALID_PTE(PointerPte++, TempPte);
} while (--PageCount);
//
// We're done!
//
@ -191,26 +190,26 @@ VOID
NTAPI
MmUnmapIoSpace(IN PVOID BaseAddress,
IN SIZE_T NumberOfBytes)
{
{
PFN_NUMBER PageCount, Pfn;
PMMPTE PointerPte;
//
// Sanity check
//
ASSERT(NumberOfBytes != 0);
//
// Get the page count
//
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, NumberOfBytes);
//
// Get the PTE and PFN
//
PointerPte = MiAddressToPte(BaseAddress);
Pfn = PFN_FROM_PTE(PointerPte);
//
// Is this an I/O mapping?
//
@ -220,13 +219,13 @@ MmUnmapIoSpace(IN PVOID BaseAddress,
// Destroy the PTE
//
RtlZeroMemory(PointerPte, PageCount * sizeof(MMPTE));
//
// Blow the TLB
//
KeFlushEntireTb(TRUE, TRUE);
}
//
// Release the PTEs
//
@ -243,7 +242,7 @@ MmMapVideoDisplay(IN PHYSICAL_ADDRESS PhysicalAddress,
IN MEMORY_CACHING_TYPE CacheType)
{
PAGED_CODE();
//
// Call the real function
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::LARGEPAGE"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -46,7 +45,7 @@ MiInitializeLargePageSupport(VOID)
/* Initialize the process tracking list, and insert the system process */
InitializeListHead(&MmProcessList);
InsertTailList(&MmProcessList, &PsGetCurrentProcess()->MmProcessLinks);
#endif
#endif
}
VOID
@ -89,7 +88,7 @@ MiInitializeDriverLargePageList(VOID)
p++;
continue;
}
/* A star means everything */
if (*p == L'*')
{
@ -97,7 +96,7 @@ MiInitializeDriverLargePageList(VOID)
MiLargePageAllDrivers = TRUE;
break;
}
DPRINT1("Large page drivers not supported\n");
ASSERT(FALSE);
}

View file

@ -12,12 +12,11 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::MDLSUP"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
/* GLOBALS ********************************************************************/
BOOLEAN MmTrackPtes;
BOOLEAN MmTrackLockedPages;
SIZE_T MmSystemLockPagesCount;
@ -34,7 +33,7 @@ MmCreateMdl(IN PMDL Mdl,
IN SIZE_T Length)
{
SIZE_T Size;
//
// Check if we don't have an MDL built
//
@ -47,7 +46,7 @@ MmCreateMdl(IN PMDL Mdl,
Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
if (!Mdl) return NULL;
}
//
// Initialize it
//
@ -81,7 +80,7 @@ MmBuildMdlForNonPagedPool(IN PMDL Mdl)
PFN_NUMBER Pfn, PageCount;
PVOID Base;
PMMPTE PointerPte;
//
// Sanity checks
//
@ -89,19 +88,19 @@ MmBuildMdlForNonPagedPool(IN PMDL Mdl)
ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
MDL_MAPPED_TO_SYSTEM_VA |
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_PARTIAL)) == 0);
MDL_PARTIAL)) == 0);
//
// We know the MDL isn't associated to a process now
//
Mdl->Process = NULL;
//
// Get page and VA information
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
Base = Mdl->StartVa;
//
// Set the system address and now get the page count
//
@ -110,7 +109,7 @@ MmBuildMdlForNonPagedPool(IN PMDL Mdl)
Mdl->ByteCount);
ASSERT(PageCount != 0);
EndPage = MdlPages + PageCount;
//
// Loop the PTEs
//
@ -123,12 +122,12 @@ MmBuildMdlForNonPagedPool(IN PMDL Mdl)
Pfn = PFN_FROM_PTE(PointerPte++);
*MdlPages++ = Pfn;
} while (MdlPages < EndPage);
//
// Set the nonpaged pool flag
//
Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
//
// Check if this is an I/O mapping
//
@ -169,7 +168,7 @@ MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
IN ULONG Flags)
{
MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
//
// Check for invalid cache type
//
@ -187,7 +186,7 @@ MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
//
CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
}
//
// Only these flags are allowed
//
@ -198,7 +197,7 @@ MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
//
return NULL;
}
//
// Call the internal routine
//
@ -223,14 +222,14 @@ MmFreePagesFromMdl(IN PMDL Mdl)
PMMPFN Pfn1;
KIRQL OldIrql;
DPRINT("Freeing MDL: %p\n", Mdl);
//
// Sanity checks
//
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
//
// Get address and page information
//
@ -260,19 +259,19 @@ MmFreePagesFromMdl(IN PMDL Mdl)
ASSERT(Pfn1);
ASSERT(Pfn1->u2.ShareCount == 1);
ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
if (Pfn1->u4.PteFrame != 0x1FFEDCB)
if (Pfn1->u4.PteFrame != 0x1FFEDCB)
{
/* Corrupted PFN entry or invalid free */
KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
}
//
// Clear it
//
Pfn1->u3.e1.StartOfAllocation = 0;
Pfn1->u3.e1.EndOfAllocation = 0;
Pfn1->u2.ShareCount = 0;
//
// Dereference it
//
@ -287,7 +286,7 @@ MmFreePagesFromMdl(IN PMDL Mdl)
/* We'll be nuking the whole page */
MiDecrementReferenceCount(Pfn1, *Pages);
}
//
// Clear this page and move on
//
@ -324,17 +323,17 @@ MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
PMMPTE PointerPte;
MMPTE TempPte;
//
// Sanity check
//
ASSERT(Mdl->ByteCount != 0);
//
// Get the base
//
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
//
// Handle kernel case first
//
@ -346,7 +345,7 @@ MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
MdlPages = (PPFN_NUMBER)(Mdl + 1);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
LastPage = MdlPages + PageCount;
//
// Sanity checks
//
@ -354,13 +353,13 @@ MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
//
// Get the correct cache type
//
IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
//
// Reserve the PTEs
//
@ -371,23 +370,23 @@ MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
// If it can fail, return NULL
//
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
//
// Should we bugcheck?
//
if (!BugCheckOnFailure) return NULL;
//
// Yes, crash the system
//
KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
}
//
// Get the mapped address
//
Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
//
// Get the template
//
@ -395,30 +394,30 @@ MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
switch (CacheAttribute)
{
case MiNonCached:
//
// Disable caching
//
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_THROUGH(&TempPte);
break;
case MiWriteCombined:
//
// Enable write combining
//
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_COMBINED(&TempPte);
break;
default:
//
// Nothing to do
//
break;
}
//
// Loop all PTEs
//
@ -428,21 +427,21 @@ MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
// We're done here
//
if (*MdlPages == LIST_HEAD) break;
//
// Write the PTE
//
TempPte.u.Hard.PageFrameNumber = *MdlPages;
MI_WRITE_VALID_PTE(PointerPte++, TempPte);
} while (++MdlPages < LastPage);
//
// Mark it as mapped
//
ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
Mdl->MappedSystemVa = Base;
Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
//
// Check if it was partial
//
@ -453,13 +452,13 @@ MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
//
Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
}
//
// Return the mapped address
//
return Base;
}
UNIMPLEMENTED;
return NULL;
}
@ -495,12 +494,12 @@ MmUnmapLockedPages(IN PVOID BaseAddress,
PFN_NUMBER PageCount;
PPFN_NUMBER MdlPages;
PMMPTE PointerPte;
//
// Sanity check
//
ASSERT(Mdl->ByteCount != 0);
//
// Check if this is a kernel request
//
@ -511,14 +510,14 @@ MmUnmapLockedPages(IN PVOID BaseAddress,
//
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
//
// Sanity checks
//
ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
ASSERT(PageCount != 0);
ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
//
// Get the PTE
//
@ -530,7 +529,7 @@ MmUnmapLockedPages(IN PVOID BaseAddress,
ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
ASSERT(PointerPte->u.Hard.Valid == 1);
//
// Check if the caller wants us to free advanced pages
//
@ -541,7 +540,7 @@ MmUnmapLockedPages(IN PVOID BaseAddress,
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
MdlPages += PageCount;
//
// Do the math
//
@ -549,21 +548,21 @@ MmUnmapLockedPages(IN PVOID BaseAddress,
PointerPte -= *MdlPages;
ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
//
// Get the new base address
//
BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
((*MdlPages) << PAGE_SHIFT));
}
//
// Remove flags
//
Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
MDL_PARTIAL_HAS_BEEN_MAPPED |
MDL_FREE_EXTRA_PTES);
//
// Release the system PTEs
//
@ -598,7 +597,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
USHORT OldRefCount, RefCount;
PMMPFN Pfn1;
DPRINT("Probing MDL: %p\n", Mdl);
//
// Sanity checks
//
@ -610,13 +609,13 @@ MmProbeAndLockPages(IN PMDL Mdl,
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_PARTIAL |
MDL_IO_SPACE)) == 0);
//
// Get page and base information
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
Base = (PVOID)Mdl->StartVa;
//
// Get the addresses and how many pages we span (and need to lock)
//
@ -624,7 +623,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
ASSERT(LockPages != 0);
/* Block invalid access */
if ((AccessMode != KernelMode) &&
((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
@ -633,7 +632,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
*MdlPages = LIST_HEAD;
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
}
//
// Get the process
//
@ -651,16 +650,16 @@ MmProbeAndLockPages(IN PMDL Mdl,
//
CurrentProcess = NULL;
}
//
// Save the number of pages we'll have to lock, and the start address
//
TotalPages = LockPages;
StartAddress = Address;
/* Large pages not supported */
ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
//
// Now probe them
//
@ -676,12 +675,12 @@ MmProbeAndLockPages(IN PMDL Mdl,
// Assume failure
//
*MdlPages = LIST_HEAD;
//
// Read
//
*(volatile CHAR*)Address;
//
// Check if this is write access (only probe for user-mode)
//
@ -693,19 +692,19 @@ MmProbeAndLockPages(IN PMDL Mdl,
//
ProbeForWriteChar(Address);
}
//
// Next address...
//
Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
//
// Next page...
//
LockPages--;
MdlPages++;
} while (Address < LastAddress);
//
// Reset back to the original page
//
@ -720,7 +719,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
ProbeStatus = _SEH2_GetExceptionCode();
}
_SEH2_END;
//
// So how did that go?
//
@ -733,7 +732,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
Mdl->Process = NULL;
ExRaiseStatus(ProbeStatus);
}
//
// Get the PTE and PDE
//
@ -743,12 +742,12 @@ MmProbeAndLockPages(IN PMDL Mdl,
DPRINT1("PAE/x64 Not Implemented\n");
ASSERT(FALSE);
#endif
//
// Sanity check
//
ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
//
// Check what kind of operation this is
//
@ -766,12 +765,12 @@ MmProbeAndLockPages(IN PMDL Mdl,
//
Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
}
//
// Mark the MDL as locked *now*
//
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
//
// Check if this came from kernel mode
//
@ -782,12 +781,12 @@ MmProbeAndLockPages(IN PMDL Mdl,
//
ASSERT(CurrentProcess == NULL);
Mdl->Process = NULL;
//
// In kernel mode, we don't need to check for write access
//
Operation = IoReadAccess;
//
// Use the PFN lock
//
@ -801,29 +800,29 @@ MmProbeAndLockPages(IN PMDL Mdl,
//
ASSERT(TotalPages != 0);
ASSERT(CurrentProcess == PsGetCurrentProcess());
//
// Track locked pages
//
InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
TotalPages);
//
// Save the process
//
Mdl->Process = CurrentProcess;
/* Lock the process working set */
MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
UsePfnLock = FALSE;
OldIrql = MM_NOIRQL;
}
//
// Get the last PTE
//
LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
//
// Loop the pages
//
@ -855,7 +854,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
/* Release process working set */
MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
//
// Access the page
//
@ -869,7 +868,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
DPRINT1("Access fault failed\n");
goto Cleanup;
}
//
// Waht lock should we use?
//
@ -886,7 +885,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
}
//
// Check if this was a write or modify
//
@ -923,7 +922,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
/* Release process working set */
MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
//
// Access the page
//
@ -936,7 +935,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
DPRINT1("Access fault failed\n");
goto Cleanup;
}
//
// Re-acquire the lock
//
@ -952,14 +951,14 @@ MmProbeAndLockPages(IN PMDL Mdl,
/* Lock the process working set */
MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
//
// Start over
//
continue;
}
}
//
// Fail, since we won't allow this
//
@ -967,7 +966,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
goto CleanupWithLock;
}
}
//
// Grab the PFN
//
@ -977,10 +976,10 @@ MmProbeAndLockPages(IN PMDL Mdl,
{
/* Either this is for kernel-mode, or the working set is held */
ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
/* No Physical VADs supported yet */
if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
/* This address should already exist and be fully valid */
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
if (MI_IS_ROS_PFN(Pfn1))
@ -992,7 +991,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
{
/* On ARM3 pages, we should see a valid share count */
ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
/* We don't support mapping a prototype page yet */
ASSERT((Pfn1->u3.e1.PrototypePte == 0) && (Pfn1->OriginalPte.u.Soft.Prototype == 0));
}
@ -1014,7 +1013,7 @@ MmProbeAndLockPages(IN PMDL Mdl,
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
/* Was this the first lock attempt? */
if (OldRefCount != 1)
{
@ -1029,17 +1028,17 @@ MmProbeAndLockPages(IN PMDL Mdl,
//
Mdl->MdlFlags |= MDL_IO_SPACE;
}
//
// Write the page and move on
//
*MdlPages++ = PageFrameIndex;
PointerPte++;
/* Check if we're on a PDE boundary */
if (!((ULONG_PTR)PointerPte & (PD_SIZE - 1))) PointerPde++;
} while (PointerPte <= LastPte);
//
// What kind of lock where we using?
//
@ -1055,19 +1054,19 @@ MmProbeAndLockPages(IN PMDL Mdl,
/* Release process working set */
MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
//
// Sanity check
//
ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
return;
CleanupWithLock:
//
// This is the failure path
//
ASSERT(!NT_SUCCESS(Status));
//
// What kind of lock where we using?
//
@ -1089,7 +1088,7 @@ Cleanup:
//
ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
MmUnlockPages(Mdl);
//
// Raise the error
//
@ -1111,7 +1110,7 @@ MmUnlockPages(IN PMDL Mdl)
USHORT RefCount, OldRefCount;
PMMPFN Pfn1;
DPRINT("Unlocking MDL: %p\n", Mdl);
//
// Sanity checks
//
@ -1119,13 +1118,13 @@ MmUnlockPages(IN PMDL Mdl)
ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
ASSERT(Mdl->ByteCount != 0);
//
// Get the process associated and capture the flags which are volatile
//
Process = Mdl->Process;
Flags = Mdl->MdlFlags;
//
// Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
//
@ -1136,7 +1135,7 @@ MmUnlockPages(IN PMDL Mdl)
//
MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
}
//
// Get the page count
//
@ -1144,22 +1143,22 @@ MmUnlockPages(IN PMDL Mdl)
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
ASSERT(PageCount != 0);
//
// We don't support AWE
//
if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
//
// Check if the buffer is mapped I/O space
//
if (Flags & MDL_IO_SPACE)
{
{
//
// Acquire PFN lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
// Loop every page
//
@ -1170,7 +1169,7 @@ MmUnlockPages(IN PMDL Mdl)
// Last page, break out
//
if (*MdlPages == LIST_HEAD) break;
//
// Check if this page is in the PFN database
//
@ -1188,14 +1187,14 @@ MmUnlockPages(IN PMDL Mdl)
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount == 0);
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* One less page */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
/* Do the last dereference, we're done here */
MiDecrementReferenceCount(Pfn1, *MdlPages);
}
@ -1236,12 +1235,12 @@ MmUnlockPages(IN PMDL Mdl)
}
}
} while (++MdlPages < LastPage);
//
// Release the lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// Check if we have a process
//
@ -1254,7 +1253,7 @@ MmUnlockPages(IN PMDL Mdl)
InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
-PageCount);
}
//
// We're done
//
@ -1262,7 +1261,7 @@ MmUnlockPages(IN PMDL Mdl)
Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
return;
}
//
// Check if we have a process
//
@ -1275,7 +1274,7 @@ MmUnlockPages(IN PMDL Mdl)
InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
-PageCount);
}
//
// Loop every page
//
@ -1298,24 +1297,24 @@ MmUnlockPages(IN PMDL Mdl)
Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
return;
}
//
// Otherwise, stop here
//
LastPage = MdlPages;
break;
}
/* Save the PFN entry instead for the secondary loop */
*MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
ASSERT((*MdlPages) != 0);
} while (++MdlPages < LastPage);
//
// Reset pointer
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
//
// Now grab the PFN lock for the actual unlock and dereference
//
@ -1334,14 +1333,14 @@ MmUnlockPages(IN PMDL Mdl)
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount == 0);
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* One less page */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
/* Do the last dereference, we're done here */
MiDecrementReferenceCount(Pfn1, MiGetPfnEntryIndex(Pfn1));
}
@ -1381,12 +1380,12 @@ MmUnlockPages(IN PMDL Mdl)
}
}
} while (++MdlPages < LastPage);
//
// Release the lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// We're done
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 16 "ARM³::DEBUGSUP"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -47,7 +46,7 @@ MiDbgTranslatePhysicalAddress(IN ULONG64 PhysicalAddress,
PVOID MappingBaseAddress;
//
// Check if we are called too early
// Check if we are called too early
//
if (MmDebugPte == NULL)
{
@ -133,7 +132,7 @@ MiDbgUnTranslatePhysicalAddress(VOID)
//
ASSERT(MmIsAddressValid(MappingBaseAddress));
//
//
// Clear the mapping PTE and invalidate its TLB entry
//
MmDebugPte->u.Long = 0;

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::INIT"
#define MODULE_INVOLVED_IN_ARM3
#include "miarm.h"
@ -38,7 +37,7 @@ PFN_NUMBER MmMaximumNonPagedPoolInPages;
//
SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
//
@ -58,7 +57,7 @@ ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
// Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
// the PFN database (which starts at 0xB0000000).
//
// The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
// The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
// for a 1GB system). On ARM³ however, it is currently capped at 128MB.
//
// The address where the initial nonpaged pool starts is aptly named
@ -78,7 +77,7 @@ ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
// a System PTE, it is always valid, until the System PTE is torn down.
//
// System PTEs are actually composed of two "spaces", the system space proper,
// and the nonpaged pool expansion space. The latter, as we've already seen,
// and the nonpaged pool expansion space. The latter, as we've already seen,
// begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
// that the system will support, the remaining address space below this address
// is used to hold the system space PTEs. This address, in turn, is held in the
@ -116,7 +115,7 @@ PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
// drivers, followed by a 4MB area containing the session's working set. This is
// then followed by a 20MB mapped view area and finally by the session's paged
// pool, by default 16MB.
//
//
// On a normal system, this results in session space occupying the region from
// 0xBD000000 to 0xC0000000
//
@ -307,7 +306,7 @@ PFN_NUMBER MiHighNonPagedPoolThreshold;
*/
PFN_NUMBER MmMinimumFreePages = 26;
/*
/*
* This number indicates how many pages we consider to be a low limit of having
* "plenty" of free memory.
*
@ -369,7 +368,7 @@ INIT_FUNCTION
MxGetNextPage(IN PFN_NUMBER PageCount)
{
PFN_NUMBER Pfn;
/* Make sure we have enough pages */
if (PageCount > MxFreeDescriptor->PageCount)
{
@ -380,7 +379,7 @@ MxGetNextPage(IN PFN_NUMBER PageCount)
MxOldFreeDescriptor.PageCount,
PageCount);
}
/* Use our lowest usable free pages */
Pfn = MxFreeDescriptor->BasePage;
MxFreeDescriptor->BasePage += PageCount;
@ -394,18 +393,18 @@ INIT_FUNCTION
MiComputeColorInformation(VOID)
{
ULONG L2Associativity;
/* Check if no setting was provided already */
if (!MmSecondaryColors)
{
/* Get L2 cache information */
L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
/* The number of colors is the number of cache bytes by set/way */
MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
if (L2Associativity) MmSecondaryColors /= L2Associativity;
}
/* Now convert cache bytes into pages */
MmSecondaryColors >>= PAGE_SHIFT;
if (!MmSecondaryColors)
@ -421,14 +420,14 @@ MiComputeColorInformation(VOID)
/* Set the maximum */
MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
}
/* Make sure there aren't too little colors */
if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
{
/* Set the default */
MmSecondaryColors = MI_SECONDARY_COLORS;
}
/* Finally make sure the colors are a power of two */
if (MmSecondaryColors & (MmSecondaryColors - 1))
{
@ -436,10 +435,10 @@ MiComputeColorInformation(VOID)
MmSecondaryColors = MI_SECONDARY_COLORS;
}
}
/* Compute the mask and store it */
MmSecondaryColorMask = MmSecondaryColors - 1;
KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
}
VOID
@ -450,10 +449,10 @@ MiInitializeColorTables(VOID)
ULONG i;
PMMPTE PointerPte, LastPte;
MMPTE TempPte = ValidKernelPte;
/* The color table starts after the ARM3 PFN database */
MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
/* Loop the PTEs. We have two color tables for each secondary color */
PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
@ -471,14 +470,14 @@ MiInitializeColorTables(VOID)
/* Zero out the page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
/* Next */
PointerPte++;
}
/* Now set the address of the next list, right after this one */
MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
/* Now loop the lists to set them up */
for (i = 0; i < MmSecondaryColors; i++)
{
@ -561,12 +560,12 @@ MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
PMMPTE PointerPte, LastPte;
MMPTE TempPte = ValidKernelPte;
/* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
FreePage = MxFreeDescriptor->BasePage;
FreePageCount = MxFreeDescriptor->PageCount;
PagesLeft = 0;
/* Loop the memory descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
@ -583,7 +582,7 @@ MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
NextEntry = MdBlock->ListEntry.Flink;
continue;
}
/* Next, check if this is our special free descriptor we've found */
if (MdBlock == MxFreeDescriptor)
{
@ -597,12 +596,12 @@ MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
BasePage = MdBlock->BasePage;
PageCount = MdBlock->PageCount;
}
/* Get the PTEs for this range */
PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
/* Loop them */
while (PointerPte <= LastPte)
{
@ -612,7 +611,7 @@ MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Use the next free page */
TempPte.u.Hard.PageFrameNumber = FreePage;
ASSERT(FreePageCount != 0);
/* Consume free pages */
FreePage++;
FreePageCount--;
@ -625,15 +624,15 @@ MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
MxOldFreeDescriptor.PageCount,
1);
}
/* Write out this PTE */
PagesLeft++;
MI_WRITE_VALID_PTE(PointerPte, TempPte);
/* Zero this page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
/* Next! */
PointerPte++;
}
@ -641,7 +640,7 @@ MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Do the next address range */
NextEntry = MdBlock->ListEntry.Flink;
}
/* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
MxFreeDescriptor->BasePage = FreePage;
MxFreeDescriptor->PageCount = FreePageCount;
@ -658,10 +657,10 @@ MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
PMMPFN Pfn1, Pfn2;
ULONG_PTR BaseAddress = 0;
/* PFN of the startup page directory */
StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
/* Start with the first PDE and scan them all */
PointerPde = MiAddressToPde(NULL);
Count = PD_COUNT * PDE_COUNT;
@ -672,7 +671,7 @@ MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
/* Get the PFN from it */
PageFrameIndex = PFN_FROM_PTE(PointerPde);
/* Do we want a PFN entry for this page? */
if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
{
@ -694,7 +693,7 @@ MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* No PFN entry */
Pfn1 = NULL;
}
/* Now get the PTE and scan the pages */
PointerPte = MiAddressToPte(BaseAddress);
for (j = 0; j < PTE_COUNT; j++)
@ -705,7 +704,7 @@ MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Increase the shared count of the PFN entry for the PDE */
ASSERT(Pfn1 != NULL);
Pfn1->u2.ShareCount++;
/* Now check if the PTE is valid memory too */
PtePageIndex = PFN_FROM_PTE(PointerPte);
if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
@ -739,7 +738,7 @@ MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
}
}
}
/* Next PTE */
PointerPte++;
BaseAddress += PAGE_SIZE;
@ -750,7 +749,7 @@ MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Next PDE mapped address */
BaseAddress += PDE_MAPPED_VA;
}
/* Next PTE */
PointerPde++;
}
@ -763,7 +762,7 @@ MiBuildPfnDatabaseZeroPage(VOID)
{
PMMPFN Pfn1;
PMMPDE PointerPde;
/* Grab the lowest page and check if it has no real references */
Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
@ -776,7 +775,7 @@ MiBuildPfnDatabaseZeroPage(VOID)
Pfn1->u3.e2.ReferenceCount = 0xFFF0;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
Pfn1->u3.e1.CacheAttribute = MiNonCached;
}
}
}
VOID
@ -792,7 +791,7 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
PMMPTE PointerPte;
PMMPDE PointerPde;
KIRQL OldIrql;
/* Now loop through the descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
@ -819,7 +818,7 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* In which case we'll trim the descriptor to go as high as we can */
PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
MdBlock->PageCount = PageCount;
/* But if there's nothing left to trim, we got too high, so quit */
if (!PageCount) break;
}
@ -829,7 +828,7 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
/* Check for bad RAM */
case LoaderBad:
DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
break;
@ -842,7 +841,7 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Get the last page of this descriptor. Note we loop backwards */
PageFrameIndex += PageCount - 1;
Pfn1 = MiGetPfnEntry(PageFrameIndex);
/* Lock the PFN Database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
while (PageCount--)
@ -859,10 +858,10 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
Pfn1--;
PageFrameIndex--;
}
/* Release PFN database */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Done with this block */
break;
@ -895,7 +894,7 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
#if MI_TRACE_PFNS
Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
#endif
/* Check for RAM disk page */
if (MdBlock->MemoryType == LoaderXIPRom)
{
@ -909,7 +908,7 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
Pfn1->u3.e1.PrototypePte = 1;
}
}
/* Advance page structures */
Pfn1++;
PageFrameIndex++;
@ -930,7 +929,7 @@ MiBuildPfnDatabaseSelf(VOID)
{
PMMPTE PointerPte, LastPte;
PMMPFN Pfn1;
/* Loop the PFN database page */
PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
@ -947,7 +946,7 @@ MiBuildPfnDatabaseSelf(VOID)
Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
#endif
}
/* Next */
PointerPte++;
}
@ -960,14 +959,14 @@ MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
/* Scan memory and start setting up PFN entries */
MiBuildPfnDatabaseFromPages(LoaderBlock);
/* Add the zero page */
MiBuildPfnDatabaseZeroPage();
/* Scan the loader block and build the rest of the PFN database */
MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
/* Finally add the pages for the PFN database itself */
/* Finally add the pages for the PFN database itself */
MiBuildPfnDatabaseSelf();
}
@ -977,7 +976,7 @@ INIT_FUNCTION
MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
{
/* This function needs to do more work, for now, we tune page minimums */
/* Check for a system with around 64MB RAM or more */
if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
{
@ -1044,35 +1043,35 @@ MiCreateMemoryEvent(IN PUNICODE_STRING Name,
/* Setup the ACL inside it */
Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
if (!NT_SUCCESS(Status)) goto CleanUp;
/* Add query rights for everyone */
Status = RtlAddAccessAllowedAce(Dacl,
ACL_REVISION,
SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
SeWorldSid);
if (!NT_SUCCESS(Status)) goto CleanUp;
/* Full rights for the admin */
Status = RtlAddAccessAllowedAce(Dacl,
ACL_REVISION,
EVENT_ALL_ACCESS,
SeAliasAdminsSid);
if (!NT_SUCCESS(Status)) goto CleanUp;
/* As well as full rights for the system */
Status = RtlAddAccessAllowedAce(Dacl,
ACL_REVISION,
EVENT_ALL_ACCESS,
SeLocalSystemSid);
if (!NT_SUCCESS(Status)) goto CleanUp;
/* Set this DACL inside the SD */
Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
TRUE,
Dacl,
FALSE);
if (!NT_SUCCESS(Status)) goto CleanUp;
/* Setup the event attributes, making sure it's a permanent one */
InitializeObjectAttributes(&ObjectAttributes,
Name,
@ -1100,7 +1099,7 @@ CleanUp:
KernelMode,
(PVOID*)Event,
NULL);
ZwClose (EventHandle);
ZwClose (EventHandle);
}
/* Return status */
@ -1297,30 +1296,30 @@ MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
switch (Pfn1->u3.e1.PageLocation)
{
case ActiveAndValid:
Consumer = "Active and Valid";
ActivePages++;
break;
case ZeroedPageList:
Consumer = "Zero Page List";
FreePages++;
break;//continue;
case FreePageList:
Consumer = "Free Page List";
FreePages++;
break;//continue;
default:
Consumer = "Other (ASSERT!)";
OtherPages++;
break;
}
#if MI_TRACE_PFNS
/* Add into bucket */
UsageBucket[Pfn1->PfnUsage]++;
@ -1343,7 +1342,7 @@ MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
"is disabled");
#endif
}
DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
DbgPrint("-----------------------------------------\n");
@ -1380,7 +1379,7 @@ MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
PLIST_ENTRY NextEntry;
PFN_NUMBER PageCount = 0;
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
//
// Now loop through the descriptors
//
@ -1401,13 +1400,13 @@ MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
//
PageCount += MdBlock->PageCount;
}
//
// Try the next descriptor
//
NextEntry = MdBlock->ListEntry.Flink;
}
//
// Return the total
//
@ -1425,7 +1424,7 @@ MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
PFN_NUMBER NextPage = -1, PageCount = 0;
PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
//
// Scan the memory descriptors
//
@ -1438,7 +1437,7 @@ MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
InitialRuns++;
NextEntry = NextEntry->Flink;
}
//
// Allocate the maximum we'll ever need
//
@ -1453,7 +1452,7 @@ MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
// For now that's how many runs we have
//
Buffer->NumberOfRuns = InitialRuns;
//
// Now loop through the descriptors again
//
@ -1473,10 +1472,10 @@ MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
// Add this to our running total
//
PageCount += MdBlock->PageCount;
//
// Check if the next page is described by the next descriptor
//
//
if (MdBlock->BasePage == NextPage)
{
//
@ -1494,20 +1493,20 @@ MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
Buffer->Run[Run].BasePage = MdBlock->BasePage;
Buffer->Run[Run].PageCount = MdBlock->PageCount;
NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
//
// And in this case, increase the number of runs
//
Run++;
}
}
//
// Try the next descriptor
//
NextEntry = MdBlock->ListEntry.Flink;
}
//
// We should not have been able to go past our initial estimate
//
@ -1535,14 +1534,14 @@ MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
Buffer->Run,
sizeof(PHYSICAL_MEMORY_RUN) * Run);
ExFreePool(Buffer);
//
// Now use the new buffer
//
Buffer = NewBuffer;
}
}
//
// Write the final numbers, and return it
//
@ -1570,7 +1569,7 @@ MiBuildPagedPool(VOID)
PointerPte = MiAddressToPte(PDE_BASE);
ASSERT(PD_COUNT == 1);
MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
//
// Allocate a system PTE which will hold a copy of the page directory
//
@ -1628,7 +1627,7 @@ MiBuildPagedPool(VOID)
//
// Let's be really sure this doesn't overflow into nonpaged system VA
//
ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
(ULONG_PTR)MmNonPagedSystemStart);
//
@ -1641,7 +1640,7 @@ MiBuildPagedPool(VOID)
// So now get the PDE for paged pool and zero it out
//
PointerPde = MiAddressToPde(MmPagedPoolStart);
#if (_MI_PAGING_LEVELS >= 3)
/* On these systems, there's no double-mapping, so instead, the PPE and PXEs
* are setup to span the entire paged pool area, so there's no need for the
@ -1759,7 +1758,7 @@ MiBuildPagedPool(VOID)
MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
/* Setup the global session space */
MiInitializeSystemSpaceMap(NULL);
}
@ -1802,7 +1801,7 @@ MiDbgDumpMemoryDescriptors(VOID)
"LoaderReserve ",
"LoaderXIPRom "
};
DPRINT1("Base\t\tLength\t\tType\n");
for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
@ -1827,10 +1826,10 @@ MmArmInitSystem(IN ULONG Phase,
PVOID Bitmap;
PPHYSICAL_MEMORY_RUN Run;
PFN_NUMBER PageCount;
/* Dump memory descriptors */
if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
//
// Instantiate memory that we don't consider RAM/usable
// We use the same exclusions that Windows does, in order to try to be
@ -1853,14 +1852,14 @@ MmArmInitSystem(IN ULONG Phase,
MiHighPagedPoolEvent = &MiTempEvent;
MiLowNonPagedPoolEvent = &MiTempEvent;
MiHighNonPagedPoolEvent = &MiTempEvent;
//
// Define the basic user vs. kernel address space separation
//
MmSystemRangeStart = (PVOID)KSEG0_BASE;
MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
/* Highest PTE and PDE based on the addresses above */
MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
@ -1878,7 +1877,7 @@ MmArmInitSystem(IN ULONG Phase,
MmBootImageSize *= PAGE_SIZE;
MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
//
// Set the size of session view, pool, and image
//
@ -1886,54 +1885,54 @@ MmArmInitSystem(IN ULONG Phase,
MmSessionViewSize = MI_SESSION_VIEW_SIZE;
MmSessionPoolSize = MI_SESSION_POOL_SIZE;
MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
//
// Set the size of system view
//
MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
//
// This is where it all ends
//
MiSessionImageEnd = (PVOID)PTE_BASE;
//
// This is where we will load Win32k.sys and the video driver
//
MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
MmSessionImageSize);
//
// So the view starts right below the session working set (itself below
// the image area)
//
MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
MmSessionImageSize -
MI_SESSION_WORKING_SET_SIZE -
MI_SESSION_WORKING_SET_SIZE -
MmSessionViewSize);
//
// Session pool follows
//
MiSessionPoolEnd = MiSessionViewStart;
MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
MmSessionPoolSize);
//
// And it all begins here
//
MmSessionBase = MiSessionPoolStart;
//
// Sanity check that our math is correct
//
ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
//
// Session space ends wherever image session space ends
//
MiSessionSpaceEnd = MiSessionImageEnd;
//
// System view space ends at session space, so now that we know where
// this is, we can compute the base address of system view space itself.
@ -1946,25 +1945,25 @@ MmArmInitSystem(IN ULONG Phase,
MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
MiSessionBasePte = MiAddressToPte(MmSessionBase);
MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
/* Initialize the user mode image list */
InitializeListHead(&MmLoadedUserImageList);
/* Initialize the paged pool mutex */
KeInitializeGuardedMutex(&MmPagedPoolMutex);
/* Initialize the Loader Lock */
KeInitializeMutant(&MmSystemLoadLock, FALSE);
KeInitializeMutant(&MmSystemLoadLock, FALSE);
/* Set the zero page event */
KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
MmZeroingPageThreadActive = FALSE;
//
// Count physical pages on the system
//
PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
//
// Check if this is a machine with less than 19MB of RAM
//
@ -1989,17 +1988,17 @@ MmArmInitSystem(IN ULONG Phase,
MmNumberOfSystemPtes <<= 1;
}
}
DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
/* Initialize the working set lock */
ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex);
/* Set commit limit */
MmTotalCommitLimit = 2 * _1GB;
MmTotalCommitLimitMaximum = MmTotalCommitLimit;
/* Has the allocation fragment been setup? */
if (!MmAllocationFragment)
{
@ -2021,17 +2020,17 @@ MmArmInitSystem(IN ULONG Phase,
/* Convert from 1KB fragments to pages */
MmAllocationFragment *= _1KB;
MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
/* Don't let it past the maximum */
MmAllocationFragment = min(MmAllocationFragment,
MI_MAX_ALLOCATION_FRAGMENT);
/* Don't let it too small either */
MmAllocationFragment = max(MmAllocationFragment,
MI_MIN_ALLOCATION_FRAGMENT);
}
/* Initialize the platform-specific parts */
/* Initialize the platform-specific parts */
MiInitMachineDependent(LoaderBlock);
//
@ -2039,7 +2038,7 @@ MmArmInitSystem(IN ULONG Phase,
//
MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
IncludeType);
//
// Allocate enough buffer for the PFN bitmap
// Align it up to a 32-bit boundary
@ -2058,7 +2057,7 @@ MmArmInitSystem(IN ULONG Phase,
MmHighestPhysicalPage,
0x101);
}
//
// Initialize it and clear all the bits to begin with
//
@ -2066,7 +2065,7 @@ MmArmInitSystem(IN ULONG Phase,
Bitmap,
MmHighestPhysicalPage + 1);
RtlClearAllBits(&MiPfnBitMap);
//
// Loop physical memory runs
//
@ -2091,7 +2090,7 @@ MmArmInitSystem(IN ULONG Phase,
RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
}
}
/* Look for large page cache entries that need caching */
MiSyncCachedRanges();
@ -2130,7 +2129,7 @@ MmArmInitSystem(IN ULONG Phase,
MmSystemSize = MmMediumSystem;
MmSystemCacheWsMinimum += 400;
}
/* Check for less than 24MB */
if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
{
@ -2157,14 +2156,14 @@ MmArmInitSystem(IN ULONG Phase,
}
}
}
/* Check for more than 33 MB */
if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
{
/* Add another 500 pages to the cache */
MmSystemCacheWsMinimum += 500;
}
/* Now setup the shared user data fields */
ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
@ -2209,25 +2208,25 @@ MmArmInitSystem(IN ULONG Phase,
DPRINT1("System cache working set too big\n");
return FALSE;
}
/* Initialize the system cache */
//MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
/* Update the commit limit */
MmTotalCommitLimit = MmAvailablePages;
if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
MmTotalCommitLimitMaximum = MmTotalCommitLimit;
/* Size up paged pool and build the shadow system page directory */
MiBuildPagedPool();
/* Debugger physical memory support is now ready to be used */
MmDebugPte = MiAddressToPte(MiDebugMapping);
/* Initialize the loaded module list */
MiInitializeLoadedModuleList(LoaderBlock);
}
//
// Always return success for now
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::MMSUP"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -122,7 +121,7 @@ NTAPI
MmIsRecursiveIoFault(VOID)
{
PETHREAD Thread = PsGetCurrentThread();
//
// If any of these is true, this is a recursive fault
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::NCACHE"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -29,25 +28,25 @@ MmAllocateNonCachedMemory(IN SIZE_T NumberOfBytes)
PHYSICAL_ADDRESS LowAddress, HighAddress, SkipBytes;
MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
PMDL Mdl;
PVOID BaseAddress;
PVOID BaseAddress;
PPFN_NUMBER MdlPages;
PMMPTE PointerPte;
MMPTE TempPte;
//
// Get the page count
//
ASSERT(NumberOfBytes != 0);
PageCount = BYTES_TO_PAGES(NumberOfBytes);
//
// Use the MDL allocator for simplicity, so setup the parameters
//
LowAddress.QuadPart = 0;
HighAddress.QuadPart = -1;
SkipBytes.QuadPart = 0;
SkipBytes.QuadPart = 0;
CacheAttribute = MiPlatformCacheAttributes[0][MmNonCached];
//
// Now call the MDL allocator
//
@ -58,7 +57,7 @@ MmAllocateNonCachedMemory(IN SIZE_T NumberOfBytes)
CacheAttribute,
0);
if (!Mdl) return NULL;
//
// Get the MDL VA and check how many pages we got (could be partial)
//
@ -74,12 +73,12 @@ MmAllocateNonCachedMemory(IN SIZE_T NumberOfBytes)
ExFreePool(Mdl);
return NULL;
}
//
// Allocate system PTEs for the base address
// We use an extra page to store the actual MDL pointer for the free later
//
PointerPte = MiReserveSystemPtes(PageCount + 1, SystemPteSpace);
PointerPte = MiReserveSystemPtes(PageCount + 1, SystemPteSpace);
if (!PointerPte)
{
//
@ -89,57 +88,57 @@ MmAllocateNonCachedMemory(IN SIZE_T NumberOfBytes)
ExFreePool(Mdl);
return NULL;
}
//
// Store the MDL pointer
//
*(PMDL*)PointerPte++ = Mdl;
//
// Okay, now see what range we got
//
BaseAddress = MiPteToAddress(PointerPte);
//
// This is our array of pages
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
//
// Setup the template PTE
//
TempPte = ValidKernelPte;
//
// Now check what kind of caching we should use
//
switch (CacheAttribute)
{
case MiNonCached:
//
// Disable caching
//
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_THROUGH(&TempPte);
break;
case MiWriteCombined:
//
// Enable write combining
//
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_COMBINED(&TempPte);
break;
default:
//
// Nothing to do
//
break;
}
//
// Now loop the MDL pages
//
@ -149,19 +148,19 @@ MmAllocateNonCachedMemory(IN SIZE_T NumberOfBytes)
// Get the PFN
//
PageFrameIndex = *MdlPages++;
//
// Set the PFN in the page and write it
//
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
MI_WRITE_VALID_PTE(PointerPte++, TempPte);
} while (--PageCount);
//
// Return the base address
//
return BaseAddress;
}
/*
@ -175,34 +174,34 @@ MmFreeNonCachedMemory(IN PVOID BaseAddress,
PMDL Mdl;
PMMPTE PointerPte;
PFN_NUMBER PageCount;
//
// Sanity checks
//
ASSERT(NumberOfBytes != 0);
ASSERT(PAGE_ALIGN(BaseAddress) == BaseAddress);
//
// Get the page count
//
PageCount = BYTES_TO_PAGES(NumberOfBytes);
//
// Get the first PTE
//
PointerPte = MiAddressToPte(BaseAddress);
//
// Remember this is where we store the shadow MDL pointer
//
Mdl = *(PMDL*)(--PointerPte);
//
// Kill the MDL (and underlying pages)
//
MmFreePagesFromMdl(Mdl);
ExFreePool(Mdl);
//
// Now free the system PTEs for the underlying VA
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::PAGFAULT"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -32,10 +31,10 @@ MiCheckVirtualAddress(IN PVOID VirtualAddress,
{
PMMVAD Vad;
PMMPTE PointerPte;
/* No prototype/section support for now */
*ProtoVad = NULL;
/* Check if this is a page table address */
if (MI_IS_PAGE_TABLE_ADDRESS(VirtualAddress))
{
@ -47,15 +46,15 @@ MiCheckVirtualAddress(IN PVOID VirtualAddress,
*ProtectCode = MM_NOACCESS;
return NULL;
}
/* Return full access rights */
*ProtectCode = MM_READWRITE;
return NULL;
}
/* Should not be a session address */
ASSERT(MI_IS_SESSION_ADDRESS(VirtualAddress) == FALSE);
/* Special case for shared data */
if (PAGE_ALIGN(VirtualAddress) == (PVOID)USER_SHARED_DATA)
{
@ -63,7 +62,7 @@ MiCheckVirtualAddress(IN PVOID VirtualAddress,
*ProtectCode = MM_READONLY;
return MmSharedUserDataPte;
}
/* Find the VAD, it might not exist if the address is bogus */
Vad = MiLocateAddress(VirtualAddress);
if (!Vad)
@ -75,7 +74,7 @@ MiCheckVirtualAddress(IN PVOID VirtualAddress,
/* This must be a VM VAD */
ASSERT(Vad->u.VadFlags.VadType == VadNone);
/* Check if it's a section, or just an allocation */
if (Vad->u.VadFlags.PrivateMemory == TRUE)
{
@ -89,29 +88,29 @@ MiCheckVirtualAddress(IN PVOID VirtualAddress,
/* Return the proto VAD */
ASSERT(Vad->u2.VadFlags2.ExtendableFile == 0);
*ProtoVad = Vad;
/* Get the prototype PTE for this page */
PointerPte = (((ULONG_PTR)VirtualAddress >> PAGE_SHIFT) - Vad->StartingVpn) + Vad->FirstPrototypePte;
ASSERT(PointerPte <= Vad->LastContiguousPte);
ASSERT(PointerPte != NULL);
/* Return the Prototype PTE and the protection for the page mapping */
*ProtectCode = Vad->u.VadFlags.Protection;
return PointerPte;
}
}
NTSTATUS
FASTCALL
MiCheckPdeForPagedPool(IN PVOID Address)
{
PMMPDE PointerPde;
NTSTATUS Status = STATUS_SUCCESS;
/* No session support in ReactOS yet */
ASSERT(MI_IS_SESSION_ADDRESS(Address) == FALSE);
ASSERT(MI_IS_SESSION_PTE(Address) == FALSE);
//
// Check if this is a fault while trying to access the page table itself
//
@ -129,7 +128,7 @@ MiCheckPdeForPagedPool(IN PVOID Address)
//
// This is totally illegal
//
return STATUS_ACCESS_VIOLATION;
return STATUS_ACCESS_VIOLATION;
}
else
{
@ -138,7 +137,7 @@ MiCheckPdeForPagedPool(IN PVOID Address)
//
PointerPde = MiAddressToPde(Address);
}
//
// Check if it's not valid
//
@ -154,7 +153,7 @@ MiCheckPdeForPagedPool(IN PVOID Address)
MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)].u.Long);
#endif
}
//
// Return status
//
@ -169,15 +168,15 @@ MiZeroPfn(IN PFN_NUMBER PageFrameNumber)
MMPTE TempPte;
PMMPFN Pfn1;
PVOID ZeroAddress;
/* Get the PFN for this page */
Pfn1 = MiGetPfnEntry(PageFrameNumber);
ASSERT(Pfn1);
/* Grab a system PTE we can use to zero the page */
ZeroPte = MiReserveSystemPtes(1, SystemPteSpace);
ASSERT(ZeroPte);
/* Initialize the PTE for it */
TempPte = ValidKernelPte;
TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
@ -221,7 +220,7 @@ MiResolveDemandZeroFault(IN PVOID Address,
DPRINT("ARM3 Demand Zero Page Fault Handler for address: %p in process: %p\n",
Address,
Process);
/* Must currently only be called by paging path */
if ((Process) && (OldIrql == MM_NOIRQL))
{
@ -230,11 +229,11 @@ MiResolveDemandZeroFault(IN PVOID Address,
/* No forking yet */
ASSERT(Process->ForkInProgress == NULL);
/* Get process color */
Color = MI_GET_NEXT_PROCESS_COLOR(Process);
ASSERT(Color != 0xFFFFFFFF);
/* We'll need a zero page */
NeedZero = TRUE;
}
@ -242,7 +241,7 @@ MiResolveDemandZeroFault(IN PVOID Address,
{
/* Check if we need a zero page */
NeedZero = (OldIrql != MM_NOIRQL);
/* Get the next system page color */
Color = MI_GET_NEXT_COLOR();
}
@ -254,10 +253,10 @@ MiResolveDemandZeroFault(IN PVOID Address,
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
HaveLock = TRUE;
}
/* We either manually locked the PFN DB, or already came with it locked */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
/* Do we need a zero page? */
ASSERT(PointerPte->u.Hard.Valid == 0);
#if MI_TRACE_PFNS
@ -291,21 +290,21 @@ MiResolveDemandZeroFault(IN PVOID Address,
/* System wants a zero page, obtain one */
PageFrameNumber = MiRemoveZeroPage(Color);
}
/* Initialize it */
MiInitializePfn(PageFrameNumber, PointerPte, TRUE);
/* Release PFN lock if needed */
if (HaveLock) KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// Increment demand zero faults
//
InterlockedIncrement(&KeGetCurrentPrcb()->MmDemandZeroCount);
/* Zero the page if need be */
if (NeedZero) MiZeroPfn(PageFrameNumber);
/* Build the PTE */
if (PointerPte <= MiHighestUserPte)
{
@ -323,10 +322,10 @@ MiResolveDemandZeroFault(IN PVOID Address,
PointerPte->u.Soft.Protection,
PageFrameNumber);
}
/* Set it dirty if it's a writable page */
if (MI_IS_PAGE_WRITEABLE(&TempPte)) MI_MAKE_DIRTY_PAGE(&TempPte);
/* Write it */
MI_WRITE_VALID_PTE(PointerPte, TempPte);
@ -350,20 +349,20 @@ MiCompleteProtoPteFault(IN BOOLEAN StoreInstruction,
PMMPTE OriginalPte;
ULONG Protection;
PFN_NUMBER PageFrameIndex;
/* Must be called with an valid prototype PTE, with the PFN lock held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
ASSERT(PointerProtoPte->u.Hard.Valid == 1);
/* Get the page */
PageFrameIndex = PFN_FROM_PTE(PointerProtoPte);
/* Get the PFN entry and set it as a prototype PTE */
Pfn1 = MiGetPfnEntry(PageFrameIndex);
Pfn1->u3.e1.PrototypePte = 1;
/* FIXME: Increment the share count for the page table */
/* Check where we should be getting the protection information from */
if (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
{
@ -379,10 +378,10 @@ MiCompleteProtoPteFault(IN BOOLEAN StoreInstruction,
/* Release the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Remove caching bits */
Protection &= ~(MM_NOCACHE | MM_NOACCESS);
/* Check if this is a kernel or user address */
if (Address < MmSystemRangeStart)
{
@ -394,7 +393,7 @@ MiCompleteProtoPteFault(IN BOOLEAN StoreInstruction,
/* Build the kernel PTE */
MI_MAKE_HARDWARE_PTE(&TempPte, PointerPte, Protection, PageFrameIndex);
}
/* Write the PTE */
MI_WRITE_VALID_PTE(PointerPte, TempPte);
@ -419,7 +418,7 @@ MiResolveProtoPteFault(IN BOOLEAN StoreInstruction,
PMMPFN Pfn1;
PFN_NUMBER PageFrameIndex;
NTSTATUS Status;
/* Must be called with an invalid, prototype PTE, with the PFN lock held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
ASSERT(PointerPte->u.Hard.Valid == 0);
@ -433,10 +432,10 @@ MiResolveProtoPteFault(IN BOOLEAN StoreInstruction,
PageFrameIndex = PFN_FROM_PTE(&TempPte);
Pfn1 = MiGetPfnEntry(PageFrameIndex);
Pfn1->u2.ShareCount++;
/* Call it a transition */
InterlockedIncrement(&KeGetCurrentPrcb()->MmTransitionCount);
/* Complete the prototype PTE fault -- this will release the PFN lock */
return MiCompleteProtoPteFault(StoreInstruction,
Address,
@ -445,7 +444,7 @@ MiResolveProtoPteFault(IN BOOLEAN StoreInstruction,
OldIrql,
NULL);
}
/* Make sure there's some protection mask */
if (TempPte.u.Long == 0)
{
@ -454,7 +453,7 @@ MiResolveProtoPteFault(IN BOOLEAN StoreInstruction,
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
return STATUS_ACCESS_VIOLATION;
}
/* This is the only thing we support right now */
ASSERT(TempPte.u.Soft.PageFileHigh == 0);
ASSERT(TempPte.u.Proto.ReadOnly == 0);
@ -465,7 +464,7 @@ MiResolveProtoPteFault(IN BOOLEAN StoreInstruction,
/* Resolve the demand zero fault */
Status = MiResolveDemandZeroFault(Address, PointerProtoPte, Process, OldIrql);
ASSERT(NT_SUCCESS(Status));
/* Complete the prototype PTE fault -- this will release the PFN lock */
ASSERT(PointerPte->u.Hard.Valid == 0);
return MiCompleteProtoPteFault(StoreInstruction,
@ -494,32 +493,32 @@ MiDispatchFault(IN BOOLEAN StoreInstruction,
DPRINT("ARM3 Page Fault Dispatcher for address: %p in process: %p\n",
Address,
Process);
//
// Make sure APCs are off and we're not at dispatch
//
OldIrql = KeGetCurrentIrql();
ASSERT(OldIrql <= APC_LEVEL);
ASSERT(KeAreAllApcsDisabled() == TRUE);
//
// Grab a copy of the PTE
//
TempPte = *PointerPte;
/* Do we have a prototype PTE? */
if (PointerProtoPte)
{
/* This should never happen */
ASSERT(!MI_IS_PHYSICAL_ADDRESS(PointerProtoPte));
/* Check if this is a kernel-mode address */
SuperProtoPte = MiAddressToPte(PointerProtoPte);
if (Address >= MmSystemRangeStart)
{
/* Lock the PFN database */
LockIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Has the PTE been made valid yet? */
if (!SuperProtoPte->u.Hard.Valid)
{
@ -554,10 +553,10 @@ MiDispatchFault(IN BOOLEAN StoreInstruction,
/* We currently only handle very limited paths */
ASSERT(PointerPte->u.Soft.Prototype == 1);
ASSERT(PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED);
/* Lock the PFN database */
LockIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* For our current usage, this should be true */
ASSERT(SuperProtoPte->u.Hard.Valid == 1);
ASSERT(TempPte.u.Hard.Valid == 0);
@ -582,20 +581,20 @@ MiDispatchFault(IN BOOLEAN StoreInstruction,
return STATUS_PAGE_FAULT_TRANSITION;
}
}
//
// The PTE must be invalid, but not totally blank
//
ASSERT(TempPte.u.Hard.Valid == 0);
ASSERT(TempPte.u.Long != 0);
//
// No prototype, transition or page file software PTEs in ARM3 yet
//
ASSERT(TempPte.u.Soft.Prototype == 0);
ASSERT(TempPte.u.Soft.Transition == 0);
ASSERT(TempPte.u.Soft.PageFileHigh == 0);
//
// If we got this far, the PTE can only be a demand zero PTE, which is what
// we want. Go handle it!
@ -641,7 +640,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
PFN_NUMBER PageFrameIndex;
ULONG Color;
DPRINT("ARM3 FAULT AT: %p\n", Address);
//
// Get the PTE and PDE
//
@ -665,7 +664,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
OldIrql);
ASSERT(OldIrql <= APC_LEVEL);
}
//
// Check for kernel fault
//
@ -675,7 +674,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
// What are you even DOING here?
//
if (Mode == UserMode) return STATUS_ACCESS_VIOLATION;
#if (_MI_PAGING_LEVELS >= 3)
/* Need to check PXE and PDE validity */
ASSERT(FALSE);
@ -690,7 +689,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
// Debug spew (eww!)
//
DPRINT("Invalid PDE\n");
#if (_MI_PAGING_LEVELS == 2)
#if (_MI_PAGING_LEVELS == 2)
//
// Handle mapping in "Special" PDE directoreis
//
@ -704,7 +703,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
//
// FIXFIX: Do the S-LIST hack
//
//
// Kill the system
//
@ -715,7 +714,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
2);
}
}
//
// The PDE is valid, so read the PTE
//
@ -733,19 +732,19 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
DPRINT1("Should NEVER happen on ARM3!!!\n");
return STATUS_ACCESS_VIOLATION;
}
//
// Otherwise, the PDE was probably invalid, and all is good now
//
return STATUS_SUCCESS;
}
//
// Check for a fault on the page table or hyperspace itself
//
if (MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS(Address))
{
#if (_MI_PAGING_LEVELS == 2)
#if (_MI_PAGING_LEVELS == 2)
/* Could be paged pool access from a new process -- synchronize the page directories */
if (MiCheckPdeForPagedPool(Address) == STATUS_WAIT_1)
{
@ -756,15 +755,15 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
/* Otherwise this could be a commit of a virtual address */
break;
}
/* In this path, we are using the system working set */
CurrentThread = PsGetCurrentThread();
WorkingSet = &MmSystemCacheWs;
/* Acquire it */
KeRaiseIrql(APC_LEVEL, &LockIrql);
MiLockWorkingSet(CurrentThread, WorkingSet);
//
// Re-read PTE now that the IRQL has been raised
//
@ -782,17 +781,17 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
DPRINT1("Should NEVER happen on ARM3!!!\n");
return STATUS_ACCESS_VIOLATION;
}
/* Release the working set */
MiUnlockWorkingSet(CurrentThread, WorkingSet);
KeLowerIrql(LockIrql);
//
// Otherwise, the PDE was probably invalid, and all is good now
//
return STATUS_SUCCESS;
}
/* Check one kind of prototype PTE */
if (TempPte.u.Soft.Prototype)
{
@ -811,17 +810,17 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
Mode,
4);
}
/* Get the prototype PTE! */
ProtoPte = MiProtoPteToPte(&TempPte);
}
else
{
{
//
// We don't implement transition PTEs
//
ASSERT(TempPte.u.Soft.Transition == 0);
/* Check for no-access PTE */
if (TempPte.u.Soft.Protection == MM_NOACCESS)
{
@ -833,7 +832,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
1);
}
}
/* Check for demand page */
if ((StoreInstruction) && !(ProtoPte) && !(TempPte.u.Hard.Valid))
{
@ -848,7 +847,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
14);
}
}
//
// Now do the real fault handling
//
@ -865,21 +864,21 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
ASSERT(KeAreAllApcsDisabled() == TRUE);
MiUnlockWorkingSet(CurrentThread, WorkingSet);
KeLowerIrql(LockIrql);
//
// We are done!
//
DPRINT("Fault resolved with status: %lx\n", Status);
return Status;
}
/* This is a user fault */
CurrentThread = PsGetCurrentThread();
CurrentProcess = PsGetCurrentProcess();
/* Lock the working set */
MiLockProcessWorkingSet(CurrentProcess, CurrentThread);
#if (_MI_PAGING_LEVELS >= 3)
/* Need to check/handle PPE and PXE validity too */
ASSERT(FALSE);
@ -894,7 +893,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
/* Check if this address range belongs to a valid allocation (VAD) */
MiCheckVirtualAddress(Address, &ProtectionCode, &Vad);
/* Right now, we expect a valid protection mask on the VAD */
ASSERT(ProtectionCode != MM_NOACCESS);
@ -928,7 +927,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
/* Now capture the PTE. Ignore virtual faults for now */
TempPte = *PointerPte;
ASSERT(TempPte.u.Hard.Valid == 0);
/* Quick check for demand-zero */
if (TempPte.u.Long == (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS))
{
@ -942,19 +941,19 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return STATUS_PAGE_FAULT_DEMAND_ZERO;
}
/* Get protection and check if it's a prototype PTE */
ProtectionCode = TempPte.u.Soft.Protection;
ASSERT(TempPte.u.Soft.Prototype == 0);
/* Check for non-demand zero PTE */
if (TempPte.u.Long != 0)
{
/* This is a page fault, check for valid protection */
ASSERT(ProtectionCode != 0x100);
/* FIXME: Run MiAccessCheck */
/* Dispatch the fault */
Status = MiDispatchFault(StoreInstruction,
Address,
@ -964,7 +963,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
PsGetCurrentProcess(),
TrapInformation,
NULL);
/* Return the status */
ASSERT(NT_SUCCESS(Status));
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
@ -979,14 +978,14 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
{
/* This is a bogus VA */
Status = STATUS_ACCESS_VIOLATION;
/* Could be a not-yet-mapped paged pool page table */
#if (_MI_PAGING_LEVELS == 2)
#if (_MI_PAGING_LEVELS == 2)
MiCheckPdeForPagedPool(Address);
#endif
/* See if that fixed it */
if (PointerPte->u.Hard.Valid == 1) Status = STATUS_SUCCESS;
/* Return the status */
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return Status;
@ -999,7 +998,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]++;
ASSERT(MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] <= PTE_COUNT);
}
/* Did we get a prototype PTE back? */
if (!ProtoPte)
{
@ -1008,7 +1007,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
/* Lock the PFN database since we're going to grab a page */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Try to get a zero page */
MI_SET_USAGE(MI_USAGE_PEB_TEB);
MI_SET_PROCESS2(CurrentProcess->ImageFileName);
@ -1062,7 +1061,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
/* And now write down the PTE, making the address valid */
MI_WRITE_VALID_PTE(PointerPte, TempPte);
/* Demand zero */
Status = STATUS_PAGE_FAULT_DEMAND_ZERO;
}
@ -1071,12 +1070,12 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
/* No guard page support yet */
ASSERT((ProtectionCode & MM_DECOMMIT) == 0);
ASSERT(ProtectionCode != 0x100);
/* Write the prototype PTE */
TempPte = PrototypePte;
TempPte.u.Soft.Protection = ProtectionCode;
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
/* Handle the fault */
Status = MiDispatchFault(StoreInstruction,
Address,
@ -1090,7 +1089,7 @@ MmArmAccessFault(IN BOOLEAN StoreInstruction,
ASSERT(PointerPte->u.Hard.Valid == 1);
ASSERT(PointerPte->u.Hard.PageFrameNumber != 0);
}
/* Release the working set */
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return Status;

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::PFNLIST"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -86,14 +85,14 @@ MiUnlinkFreeOrZeroedPage(IN PMMPFN Entry)
ULONG Color;
PMMCOLOR_TABLES ColorTable;
PMMPFN Pfn1;
/* Make sure the PFN lock is held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
/* Make sure the PFN entry isn't in-use */
ASSERT(Entry->u3.e1.WriteInProgress == 0);
ASSERT(Entry->u3.e1.ReadInProgress == 0);
/* Find the list for this entry, make sure it's the free or zero list */
ListHead = MmPageLocationList[Entry->u3.e1.PageLocation];
ListName = ListHead->ListName;
@ -104,11 +103,11 @@ MiUnlinkFreeOrZeroedPage(IN PMMPFN Entry)
/* Remove one count */
ASSERT(ListHead->Total != 0);
ListHead->Total--;
/* Get the forward and back pointers */
OldFlink = Entry->u1.Flink;
OldBlink = Entry->u2.Blink;
/* Check if the next entry is the list head */
if (OldFlink != LIST_HEAD)
{
@ -120,7 +119,7 @@ MiUnlinkFreeOrZeroedPage(IN PMMPFN Entry)
/* Set the list head's backlink instead */
ListHead->Blink = OldBlink;
}
/* Check if the back entry is the list head */
if (OldBlink != LIST_HEAD)
{
@ -183,7 +182,7 @@ MiUnlinkFreeOrZeroedPage(IN PMMPFN Entry)
/* One less colored page */
ASSERT(ColorTable->Count >= 1);
ColorTable->Count--;
/* ReactOS Hack */
Entry->OriginalPte.u.Long = 0;
@ -202,13 +201,13 @@ MiUnlinkFreeOrZeroedPage(IN PMMPFN Entry)
/* Signal the low memory event */
KeSetEvent(MiLowMemoryEvent, 0, FALSE);
}
/* One less page */
if (--MmAvailablePages < MmMinimumFreePages)
{
/* FIXME: Should wake up the MPW and working set manager, if we had one */
}
#if MI_TRACE_PFNS
ASSERT(MI_PFN_CURRENT_USAGE != MI_USAGE_NOT_SET);
Entry->PfnUsage = MI_PFN_CURRENT_USAGE;
@ -238,7 +237,7 @@ MiRemovePageByColor(IN PFN_NUMBER PageIndex,
Pfn1 = MI_PFN_ELEMENT(PageIndex);
ASSERT(Pfn1->u3.e1.RemovalRequested == 0);
ASSERT(Pfn1->u3.e1.Rom == 0);
/* Capture data for later */
OldColor = Pfn1->u3.e1.PageColor;
OldCache = Pfn1->u3.e1.CacheAttribute;
@ -248,14 +247,14 @@ MiRemovePageByColor(IN PFN_NUMBER PageIndex,
ASSERT_LIST_INVARIANT(ListHead);
ListName = ListHead->ListName;
ASSERT(ListName <= FreePageList);
/* Remove a page */
ListHead->Total--;
/* Get the forward and back pointers */
OldFlink = Pfn1->u1.Flink;
OldBlink = Pfn1->u2.Blink;
/* Check if the next entry is the list head */
if (OldFlink != LIST_HEAD)
{
@ -267,7 +266,7 @@ MiRemovePageByColor(IN PFN_NUMBER PageIndex,
/* Set the list head's backlink instead */
ListHead->Blink = OldBlink;
}
/* Check if the back entry is the list head */
if (OldBlink != LIST_HEAD)
{
@ -279,11 +278,11 @@ MiRemovePageByColor(IN PFN_NUMBER PageIndex,
/* Set the list head's backlink instead */
ListHead->Flink = OldFlink;
}
/* We are not on a list anymore */
ASSERT_LIST_INVARIANT(ListHead);
Pfn1->u1.Flink = Pfn1->u2.Blink = 0;
/* Zero flags but restore color and cache */
Pfn1->u3.e2.ShortFlags = 0;
Pfn1->u3.e1.PageColor = OldColor;
@ -293,25 +292,25 @@ MiRemovePageByColor(IN PFN_NUMBER PageIndex,
ASSERT(Color < MmSecondaryColors);
ColorTable = &MmFreePagesByColor[ListName][Color];
ASSERT(ColorTable->Count >= 1);
/* Set the forward link to whoever we were pointing to */
ColorTable->Flink = Pfn1->OriginalPte.u.Long;
/* Get the first page on the color list */
if (ColorTable->Flink == LIST_HEAD)
{
/* This is the beginning of the list, so set the sentinel value */
ColorTable->Blink = (PVOID)LIST_HEAD;
ColorTable->Blink = (PVOID)LIST_HEAD;
}
else
{
/* The list is empty, so we are the first page */
MI_PFN_ELEMENT(ColorTable->Flink)->u4.PteFrame = COLORED_LIST_HEAD;
}
/* One less page */
ColorTable->Count--;
/* ReactOS Hack */
Pfn1->OriginalPte.u.Long = 0;
@ -326,7 +325,7 @@ MiRemovePageByColor(IN PFN_NUMBER PageIndex,
/* Signal the low memory event */
KeSetEvent(MiLowMemoryEvent, 0, FALSE);
}
/* One less page */
if (--MmAvailablePages < MmMinimumFreePages)
{
@ -396,7 +395,7 @@ MiRemoveAnyPage(IN ULONG Color)
ASSERT(Pfn1->u2.ShareCount == 0);
ASSERT_LIST_INVARIANT(&MmFreePageListHead);
ASSERT_LIST_INVARIANT(&MmZeroedPageListHead);
/* Return the page */
return PageIndex;
}
@ -457,10 +456,10 @@ MiRemoveZeroPage(IN ULONG Color)
/* Remove the page from its list */
PageIndex = MiRemovePageByColor(PageIndex, Color);
ASSERT(Pfn1 == MI_PFN_ELEMENT(PageIndex));
/* Zero it, if needed */
if (Zero) MiZeroPhysicalPage(PageIndex);
/* Sanity checks */
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
ASSERT(Pfn1->u2.ShareCount == 0);
@ -562,18 +561,18 @@ MiInsertPageInFreeList(IN PFN_NUMBER PageFrameIndex)
{
/* Get the previous page */
Blink = (PMMPFN)ColorTable->Blink;
/* Make it link to us, and link back to it */
Blink->OriginalPte.u.Long = PageFrameIndex;
Pfn1->u4.PteFrame = MiGetPfnEntryIndex(Blink);
}
/* Now initialize our own list pointers */
ColorTable->Blink = Pfn1;
/* This page is now the last */
Pfn1->OriginalPte.u.Long = LIST_HEAD;
/* And increase the count in the colored list */
ColorTable->Count++;
@ -584,7 +583,7 @@ MiInsertPageInFreeList(IN PFN_NUMBER PageFrameIndex)
MmZeroingPageThreadActive = TRUE;
KeSetEvent(&MmZeroingPageEvent, IO_NO_INCREMENT, FALSE);
}
#if MI_TRACE_PFNS
Pfn1->PfnUsage = MI_USAGE_FREE_PAGE;
RtlZeroMemory(Pfn1->ProcessName, 16);
@ -608,12 +607,12 @@ MiInsertPageInList(IN PMMPFNLIST ListHead,
/* Make sure the lock is held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
/* Make sure the PFN is valid */
ASSERT((PageFrameIndex) &&
(PageFrameIndex <= MmHighestPhysicalPage) &&
(PageFrameIndex >= MmLowestPhysicalPage));
/* Page should be unused */
Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
@ -653,7 +652,7 @@ MiInsertPageInList(IN PMMPFNLIST ListHead,
/* One more page on the system */
MmAvailablePages++;
/* Check if we've reached the configured low memory threshold */
if (MmAvailablePages == MmLowMemoryThreshold)
{
@ -701,7 +700,7 @@ MiInsertPageInList(IN PMMPFNLIST ListHead,
/* One more paged on the colored list */
ColorHead->Count++;
#if MI_TRACE_PFNS
//ASSERT(MI_PFN_CURRENT_USAGE == MI_USAGE_NOT_SET);
Pfn1->PfnUsage = MI_USAGE_FREE_PAGE;
@ -730,7 +729,7 @@ MiInitializePfn(IN PFN_NUMBER PageFrameIndex,
{
/* Only valid from MmCreateProcessAddressSpace path */
ASSERT(PsGetCurrentProcess()->Vm.WorkingSetSize == 0);
/* Make this a demand zero PTE */
MI_MAKE_SOFTWARE_PTE(&Pfn1->OriginalPte, MM_READWRITE);
}
@ -788,20 +787,20 @@ MiAllocatePfn(IN PMMPTE PointerPte,
/* Sanity check that we aren't passed a valid PTE */
ASSERT(PointerPte->u.Hard.Valid == 0);
/* Make an empty software PTE */
MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
/* Lock the PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Check if we're running low on pages */
if (MmAvailablePages < 128)
{
DPRINT1("Warning, running low on memory: %d pages left\n", MmAvailablePages);
//MiEnsureAvailablePageOrWait(NULL, OldIrql);
}
/* Grab a page */
ASSERT_LIST_INVARIANT(&MmFreePageListHead);
ASSERT_LIST_INVARIANT(&MmZeroedPageListHead);
@ -810,10 +809,10 @@ MiAllocatePfn(IN PMMPTE PointerPte,
/* Write the software PTE */
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
PointerPte->u.Soft.Protection |= Protection;
/* Initialize its PFN entry */
MiInitializePfn(PageFrameIndex, PointerPte, TRUE);
/* Release the PFN lock and return the page */
ASSERT_LIST_INVARIANT(&MmFreePageListHead);
ASSERT_LIST_INVARIANT(&MmZeroedPageListHead);
@ -852,10 +851,10 @@ MiDecrementShareCount(IN PMMPFN Pfn1,
/* Put the page in transition */
Pfn1->u3.e1.PageLocation = TransitionPage;
/* PFN lock must be held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
/* Page should at least have one reference */
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
if (Pfn1->u3.e2.ReferenceCount == 1)
@ -931,14 +930,14 @@ MiInitializePfnForOtherProcess(IN PFN_NUMBER PageFrameIndex,
IN PFN_NUMBER PteFrame)
{
PMMPFN Pfn1;
/* Setup the PTE */
Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
Pfn1->PteAddress = PointerPte;
/* Make this a software PTE */
MI_MAKE_SOFTWARE_PTE(&Pfn1->OriginalPte, MM_READWRITE);
/* Setup the page */
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
Pfn1->u3.e2.ReferenceCount = 1;
@ -946,14 +945,14 @@ MiInitializePfnForOtherProcess(IN PFN_NUMBER PageFrameIndex,
Pfn1->u3.e1.PageLocation = ActiveAndValid;
Pfn1->u3.e1.Modified = TRUE;
Pfn1->u4.InPageError = FALSE;
/* Did we get a PFN for the page table */
if (PteFrame)
{
/* Store it */
Pfn1->u4.PteFrame = PteFrame;
/* Increase its share count so we don't get rid of it */
/* Increase its share count so we don't get rid of it */
Pfn1 = MI_PFN_ELEMENT(PteFrame);
Pfn1->u2.ShareCount++;
}

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::POOL"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -55,7 +54,7 @@ MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
TempPte.u.Soft.Prototype = 1;
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
} while (++PointerPte < LastPte);
/* Flush the TLB */
KeFlushEntireTb(TRUE, TRUE);
}
@ -71,11 +70,11 @@ MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
/* If pool is physical, can't protect PTEs */
if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
/* Get, and capture the PTE */
PointerPte = MiAddressToPte(VirtualAddress);
TempPte = *PointerPte;
/* Loop protected PTEs */
while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
{
@ -83,14 +82,14 @@ MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
TempPte.u.Hard.Valid = 1;
TempPte.u.Soft.Prototype = 0;
MI_WRITE_VALID_PTE(PointerPte, TempPte);
/* One more page */
if (++UnprotectedPages == PageCount) break;
/* Capture next PTE */
TempPte = *(++PointerPte);
}
/* Return if any pages were unprotected */
return UnprotectedPages ? TRUE : FALSE;
}
@ -103,27 +102,27 @@ MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
{
BOOLEAN Safe;
PVOID PoolVa;
/* Initialize variables */
*PoolFlink = *PoolBlink = NULL;
/* Check if the list has entries */
if (IsListEmpty(Links) == FALSE)
{
/* We are going to need to forward link to do an insert */
PoolVa = Links->Flink;
/* So make it safe to access */
Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
if (Safe) PoolFlink = PoolVa;
}
/* Are we going to need a backward link too? */
if (Links != Links->Blink)
{
/* Get the head's backward link for the insert */
PoolVa = Links->Blink;
/* Make it safe to access */
Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
if (Safe) PoolBlink = PoolVa;
@ -147,13 +146,13 @@ MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
IN BOOLEAN Critical)
{
PVOID PoolFlink, PoolBlink;
/* Make the list accessible */
MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
/* Now insert in the right position */
Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
/* And reprotect the pages containing the free links */
MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
}
@ -163,13 +162,13 @@ NTAPI
MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
{
PVOID PoolFlink, PoolBlink;
/* Make the list accessible */
MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
/* Now remove */
RemoveEntryList(Entry);
/* And reprotect the pages containing the free links */
if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
@ -294,7 +293,7 @@ MiInitializeNonPagedPool(VOID)
//
PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
MmNumberOfFreeNonPagedPool = PoolPages;
//
// Initialize the first free entry
//
@ -309,7 +308,7 @@ MiInitializeNonPagedPool(VOID)
//
InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
&FreeEntry->List);
//
// Now create free entries for every single other page
//
@ -329,37 +328,37 @@ MiInitializeNonPagedPool(VOID)
PointerPte = MiAddressToPte(MmNonPagedPoolStart);
ASSERT(PointerPte->u.Hard.Valid == 1);
MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
//
// Keep track of where initial nonpaged pool ends
//
MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
MmSizeOfNonPagedPoolInBytes);
//
// Validate and remember last allocated pool page
//
PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
ASSERT(PointerPte->u.Hard.Valid == 1);
MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
//
// Validate the first nonpaged pool expansion page (which is a guard page)
//
PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
ASSERT(PointerPte->u.Hard.Valid == 0);
//
// Calculate the size of the expansion region alone
//
MiExpansionPoolPagesInitialCharge =
BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
//
// Remove 2 pages, since there's a guard page on top and on the bottom
//
MiExpansionPoolPagesInitialCharge -= 2;
//
// Now initialize the nonpaged pool expansion PTE space. Remember there's a
// guard page on top so make sure to skip it. The bottom guard page will be
@ -388,12 +387,12 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
PVOID BaseVa, BaseVaStart;
PMMFREE_POOL_ENTRY FreeEntry;
PKSPIN_LOCK_QUEUE LockQueue;
//
// Figure out how big the allocation is in pages
//
SizeInPages = BYTES_TO_PAGES(SizeInBytes);
//
// Handle paged pool
//
@ -403,7 +402,7 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
// Lock the paged pool mutex
//
KeAcquireGuardedMutex(&MmPagedPoolMutex);
//
// Find some empty allocation space
//
@ -417,7 +416,7 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
//
i = ((SizeInPages - 1) / PTE_COUNT) + 1;
DPRINT1("Paged pool expansion: %d %x\n", i, SizeInPages);
//
// Check if there is enougn paged pool expansion space left
//
@ -431,17 +430,17 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
KeReleaseGuardedMutex(&MmPagedPoolMutex);
return NULL;
}
//
// Check if we'll have to expand past the last PTE we have available
//
//
if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
(PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
{
//
// We can only support this much then
//
PageTableCount = (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
PageTableCount = (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
1;
ASSERT(PageTableCount < i);
@ -454,30 +453,30 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
//
PageTableCount = i;
}
//
// Get the template PDE we'll use to expand
//
TempPde = ValidKernelPde;
//
// Get the first PTE in expansion space
//
PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
BaseVa = MiPdeToAddress(PointerPde);
BaseVaStart = BaseVa;
//
// Lock the PFN database and loop pages
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
do
{
//
// It should not already be valid
//
ASSERT(PointerPde->u.Hard.Valid == 0);
/* Request a page */
MI_SET_USAGE(MI_USAGE_PAGED_POOL);
MI_SET_PROCESS2("Kernel");
@ -491,15 +490,15 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
// Save it into our double-buffered system page directory
//
MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
/* Initialize the PFN */
MiInitializePfnForOtherProcess(PageFrameNumber,
(PMMPTE)PointerPde,
MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);
/* Write the actual PDE now */
MI_WRITE_VALID_PDE(PointerPde, TempPde);
#endif
#endif
//
// Move on to the next expansion address
//
@ -507,12 +506,12 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
i--;
} while (i > 0);
//
// Release the PFN database lock
//
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// These pages are now available, clear their availablity bits
//
@ -522,24 +521,24 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
EndAllocation,
PageTableCount * PTE_COUNT);
//
// Update the next expansion location
//
MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;
//
// Zero out the newly available memory
//
RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);
//
// Now try consuming the pages again
//
i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
SizeInPages,
0);
if (i == 0xFFFFFFFF)
if (i == 0xFFFFFFFF)
{
//
// Out of memory!
@ -549,37 +548,37 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
return NULL;
}
}
//
// Update the pool hint if the request was just one page
//
if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
//
// Update the end bitmap so we know the bounds of this allocation when
// the time comes to free it
//
EndAllocation = i + SizeInPages - 1;
RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
//
// Now we can release the lock (it mainly protects the bitmap)
//
KeReleaseGuardedMutex(&MmPagedPoolMutex);
//
// Now figure out where this allocation starts
//
BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
//
// Flush the TLB
//
KeFlushEntireTb(TRUE, TRUE);
/* Setup a demand-zero writable PTE */
MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
//
// Find the first and last PTE, then loop them all
//
@ -592,25 +591,25 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
//
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
} while (++PointerPte < StartPte);
//
// Return the allocation address to the caller
//
return BaseVa;
}
}
//
// Allocations of less than 4 pages go into their individual buckets
//
i = SizeInPages - 1;
if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
//
// Loop through all the free page lists based on the page index
//
NextHead = &MmNonPagedPoolFreeListHead[i];
LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
//
// Acquire the nonpaged pool lock
//
@ -629,7 +628,7 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
/* We need to be able to touch this page, unprotect it */
MiUnProtectFreeNonPagedPool(NextEntry, 0);
}
//
// Grab the entry and see if it can handle our allocation
//
@ -647,12 +646,12 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
//
BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
(FreeEntry->Size << PAGE_SHIFT));
/* Remove the item from the list, depending if pool is protected */
MmProtectFreedNonPagedPool ?
MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
RemoveEntryList(&FreeEntry->List);
//
// However, check if its' still got space left
//
@ -666,7 +665,7 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
MmProtectFreedNonPagedPool ?
MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
/* Is freed non paged pool protected? */
if (MmProtectFreedNonPagedPool)
{
@ -674,28 +673,28 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
}
}
//
// Grab the PTE for this allocation
//
PointerPte = MiAddressToPte(BaseVa);
ASSERT(PointerPte->u.Hard.Valid == 1);
//
// Grab the PFN NextEntry and index
//
Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
//
// Now mark it as the beginning of an allocation
//
ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
Pfn1->u3.e1.StartOfAllocation = 1;
/* Mark it as special pool if needed */
ASSERT(Pfn1->u4.VerifierAllocation == 0);
if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
//
// Check if the allocation is larger than one page
//
@ -708,25 +707,25 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
ASSERT(PointerPte->u.Hard.Valid == 1);
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
}
//
// Mark this PFN as the last (might be the same as the first)
//
ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
Pfn1->u3.e1.EndOfAllocation = 1;
//
// Release the nonpaged pool lock, and return the allocation
//
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
return BaseVa;
}
//
// Try the next free page entry
//
NextEntry = FreeEntry->List.Flink;
/* Is freed non paged pool protected? */
if (MmProtectFreedNonPagedPool)
{
@ -735,7 +734,7 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
}
}
} while (++NextHead < LastHead);
//
// If we got here, we're out of space.
// Start by releasing the lock
@ -755,18 +754,18 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
DPRINT1("Out of NP Expansion Pool\n");
return NULL;
}
//
// Acquire the pool lock now
//
OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
//
// Lock the PFN database too
//
LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
//
// Loop the pages
//
@ -777,7 +776,7 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
MI_SET_USAGE(MI_USAGE_PAGED_POOL);
MI_SET_PROCESS2("Kernel");
PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
/* Get the PFN entry for it and fill it out */
Pfn1 = MiGetPfnEntry(PageFrameNumber);
Pfn1->u3.e2.ReferenceCount = 1;
@ -785,33 +784,33 @@ MiAllocatePoolPages(IN POOL_TYPE PoolType,
Pfn1->PteAddress = PointerPte;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
Pfn1->u4.VerifierAllocation = 0;
/* Write the PTE for it */
TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
MI_WRITE_VALID_PTE(PointerPte++, TempPte);
} while (--SizeInPages > 0);
//
// This is the last page
//
Pfn1->u3.e1.EndOfAllocation = 1;
//
// Get the first page and mark it as such
//
Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
Pfn1->u3.e1.StartOfAllocation = 1;
/* Mark it as a verifier allocation if needed */
ASSERT(Pfn1->u4.VerifierAllocation == 0);
if (PoolType & 64) Pfn1->u4.VerifierAllocation = 1;
//
// Release the PFN and nonpaged pool lock
//
KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
//
// Return the address
//
@ -828,7 +827,7 @@ MiFreePoolPages(IN PVOID StartingVa)
KIRQL OldIrql;
PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
ULONG i, End;
//
// Handle paged pool
//
@ -840,56 +839,56 @@ MiFreePoolPages(IN PVOID StartingVa)
//
i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
End = i;
//
// Now use the end bitmap to scan until we find a set bit, meaning that
// this allocation finishes here
//
while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
//
// Now calculate the total number of pages this allocation spans
//
NumberOfPages = End - i + 1;
/* Delete the actual pages */
PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
ASSERT(FreePages == NumberOfPages);
//
// Acquire the paged pool lock
//
KeAcquireGuardedMutex(&MmPagedPoolMutex);
//
// Clear the allocation and free bits
//
RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
//
// Update the hint if we need to
//
if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
//
// Release the lock protecting the bitmaps
//
KeReleaseGuardedMutex(&MmPagedPoolMutex);
//
// And finally return the number of pages freed
//
return NumberOfPages;
}
//
// Get the first PTE and its corresponding PFN entry
//
StartPte = PointerPte = MiAddressToPte(StartingVa);
StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
//
// Loop until we find the last PTE
//
@ -901,33 +900,33 @@ MiFreePoolPages(IN PVOID StartingVa)
PointerPte++;
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
}
//
// Now we know how many pages we have
//
NumberOfPages = PointerPte - StartPte + 1;
//
// Acquire the nonpaged pool lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
//
// Mark the first and last PTEs as not part of an allocation anymore
//
StartPfn->u3.e1.StartOfAllocation = 0;
StartPfn->u3.e1.StartOfAllocation = 0;
Pfn1->u3.e1.EndOfAllocation = 0;
//
// Assume we will free as many pages as the allocation was
//
FreePages = NumberOfPages;
//
// Peek one page past the end of the allocation
//
PointerPte++;
//
// Guard against going past initial nonpaged pool
//
@ -942,16 +941,16 @@ MiFreePoolPages(IN PVOID StartingVa)
{
/* Sanity check */
ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
/* Check if protected pool is enabled */
if (MmProtectFreedNonPagedPool)
{
/* The freed block will be merged, it must be made accessible */
MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
}
//
// Otherwise, our entire allocation must've fit within the initial non
// Otherwise, our entire allocation must've fit within the initial non
// paged pool, or the expansion nonpaged pool, so get the PFN entry of
// the next allocation
//
@ -960,7 +959,7 @@ MiFreePoolPages(IN PVOID StartingVa)
//
// It's either expansion or initial: get the PFN entry
//
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
}
else
{
@ -970,9 +969,9 @@ MiFreePoolPages(IN PVOID StartingVa)
//
Pfn1 = NULL;
}
}
//
// Check if this allocation actually exists
//
@ -985,21 +984,21 @@ MiFreePoolPages(IN PVOID StartingVa)
(NumberOfPages << PAGE_SHIFT));
ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
ASSERT(FreeEntry->Owner == FreeEntry);
/* Consume this entry's pages */
FreePages += FreeEntry->Size;
/* Remove the item from the list, depending if pool is protected */
MmProtectFreedNonPagedPool ?
MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
RemoveEntryList(&FreeEntry->List);
}
//
// Now get the official free entry we'll create for the caller's allocation
//
FreeEntry = StartingVa;
//
// Check if the our allocation is the very first page
//
@ -1016,14 +1015,14 @@ MiFreePoolPages(IN PVOID StartingVa)
// Otherwise, get the PTE for the page right before our allocation
//
PointerPte -= NumberOfPages + 1;
/* Check if protected pool is enabled */
if (MmProtectFreedNonPagedPool)
{
/* The freed block will be merged, it must be made accessible */
MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
}
/* Check if this is valid pool, or a guard page */
if (PointerPte->u.Hard.Valid == 1)
{
@ -1040,7 +1039,7 @@ MiFreePoolPages(IN PVOID StartingVa)
Pfn1 = NULL;
}
}
//
// Check if there is a valid PFN entry for the page before the allocation
// and then check if this page was actually the end of an allocation.
@ -1054,14 +1053,14 @@ MiFreePoolPages(IN PVOID StartingVa)
FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
FreeEntry = FreeEntry->Owner;
/* Check if protected pool is enabled */
if (MmProtectFreedNonPagedPool)
{
/* The freed block will be merged, it must be made accessible */
MiUnProtectFreeNonPagedPool(FreeEntry, 0);
}
//
// Check if the entry is small enough to be indexed on a free list
// If it is, we'll want to re-insert it, since we're about to
@ -1073,18 +1072,18 @@ MiFreePoolPages(IN PVOID StartingVa)
MmProtectFreedNonPagedPool ?
MiProtectedPoolRemoveEntryList(&FreeEntry->List) :
RemoveEntryList(&FreeEntry->List);
//
// Update its size
//
FreeEntry->Size += FreePages;
//
// And now find the new appropriate list to place it in
//
i = (ULONG)(FreeEntry->Size - 1);
if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
/* Insert the entry into the free list head, check for prot. pool */
MmProtectFreedNonPagedPool ?
MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
@ -1098,7 +1097,7 @@ MiFreePoolPages(IN PVOID StartingVa)
FreeEntry->Size += FreePages;
}
}
//
// Check if we were unable to do any compaction, and we'll stick with this
//
@ -1109,30 +1108,30 @@ MiFreePoolPages(IN PVOID StartingVa)
// pages, at best we have our pages plus whatever entry came after us
//
FreeEntry->Size = FreePages;
//
// Find the appropriate list we should be on
//
i = FreeEntry->Size - 1;
if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
/* Insert the entry into the free list head, check for prot. pool */
MmProtectFreedNonPagedPool ?
MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE) :
InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
}
//
// Just a sanity check
//
ASSERT(FreePages != 0);
//
// Get all the pages between our allocation and its end. These will all now
// become free page chunks.
//
NextEntry = StartingVa;
LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
do
{
//
@ -1142,14 +1141,14 @@ MiFreePoolPages(IN PVOID StartingVa)
NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
} while (NextEntry != LastEntry);
/* Is freed non paged pool protected? */
if (MmProtectFreedNonPagedPool)
{
/* Protect the freed pool! */
MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
}
//
// We're done, release the lock and let the caller know how much we freed
//

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::PROCSUP"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -60,11 +59,11 @@ MiCreatePebOrTeb(IN PEPROCESS Process,
LARGE_INTEGER CurrentTime;
TABLE_SEARCH_RESULT Result = TableFoundNode;
PMMADDRESS_NODE Parent;
/* Allocate a VAD */
Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
if (!Vad) return STATUS_NO_MEMORY;
/* Setup the primary flags with the size, and make it commited, private, RW */
Vad->u.LongFlags = 0;
Vad->u.VadFlags.CommitCharge = BYTES_TO_PAGES(Size);
@ -72,13 +71,13 @@ MiCreatePebOrTeb(IN PEPROCESS Process,
Vad->u.VadFlags.PrivateMemory = TRUE;
Vad->u.VadFlags.Protection = MM_READWRITE;
Vad->u.VadFlags.NoChange = TRUE;
/* Setup the secondary flags to make it a secured, writable, long VAD */
Vad->u2.LongFlags2 = 0;
Vad->u2.VadFlags2.OneSecured = TRUE;
Vad->u2.VadFlags2.LongVad = TRUE;
Vad->u2.VadFlags2.ReadOnly = FALSE;
/* Lock the process address space */
KeAcquireGuardedMutex(&Process->AddressCreationLock);
@ -120,23 +119,23 @@ MiCreatePebOrTeb(IN PEPROCESS Process,
/* Bail out, if still nothing free was found */
if (Result == TableFoundNode) return STATUS_NO_MEMORY;
}
/* Validate that it came from the VAD ranges */
ASSERT(*Base >= (ULONG_PTR)MI_LOWEST_VAD_ADDRESS);
/* Build the rest of the VAD now */
Vad->StartingVpn = (*Base) >> PAGE_SHIFT;
Vad->EndingVpn = ((*Base) + Size - 1) >> PAGE_SHIFT;
Vad->u3.Secured.StartVpn = *Base;
Vad->u3.Secured.EndVpn = (Vad->EndingVpn << PAGE_SHIFT) | (PAGE_SIZE - 1);
Vad->u1.Parent = NULL;
/* FIXME: Should setup VAD bitmap */
Status = STATUS_SUCCESS;
/* Pretend as if we own the working set */
MiLockProcessWorkingSet(Process, Thread);
/* Insert the VAD */
ASSERT(Vad->EndingVpn >= Vad->StartingVpn);
Process->VadRoot.NodeHint = Vad;
@ -166,20 +165,20 @@ MmDeleteTeb(IN PEPROCESS Process,
PMMVAD Vad;
PMM_AVL_TABLE VadTree = &Process->VadRoot;
DPRINT("Deleting TEB: %p in %16s\n", Teb, Process->ImageFileName);
/* TEB is one page */
TebEnd = (ULONG_PTR)Teb + ROUND_TO_PAGES(sizeof(TEB)) - 1;
/* Attach to the process */
KeAttachProcess(&Process->Pcb);
/* Lock the process address space */
KeAcquireGuardedMutex(&Process->AddressCreationLock);
/* Find the VAD, make sure it's a TEB VAD */
Vad = MiLocateAddress(Teb);
DPRINT("Removing node for VAD: %lx %lx\n", Vad->StartingVpn, Vad->EndingVpn);
ASSERT(Vad != NULL);
ASSERT(Vad != NULL);
if (Vad->StartingVpn != ((ULONG_PTR)Teb >> PAGE_SHIFT))
{
/* Bug in the AVL code? */
@ -200,17 +199,17 @@ MmDeleteTeb(IN PEPROCESS Process,
/* Remove this VAD from the tree */
ASSERT(VadTree->NumberGenericTableElements >= 1);
MiRemoveNode((PMMADDRESS_NODE)Vad, VadTree);
/* Release the working set */
MiUnlockProcessWorkingSet(Process, Thread);
/* Remove the VAD */
ExFreePool(Vad);
}
/* Release the address space lock */
KeReleaseGuardedMutex(&Process->AddressCreationLock);
/* Detach */
KeDetachProcess();
}
@ -225,22 +224,22 @@ MmDeleteKernelStack(IN PVOID StackBase,
PMMPFN Pfn1;//, Pfn2;
ULONG i;
KIRQL OldIrql;
//
// This should be the guard page, so decrement by one
//
PointerPte = MiAddressToPte(StackBase);
PointerPte--;
//
// Calculate pages used
//
StackPages = BYTES_TO_PAGES(GuiStack ?
KERNEL_LARGE_STACK_SIZE : KERNEL_STACK_SIZE);
/* Acquire the PFN lock */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
// Loop them
//
@ -258,28 +257,28 @@ MmDeleteKernelStack(IN PVOID StackBase,
/* Now get the page of the page table mapping it */
PageTableFrameNumber = Pfn1->u4.PteFrame;
Pfn2 = MiGetPfnEntry(PageTableFrameNumber);
/* Remove a shared reference, since the page is going away */
MiDecrementShareCount(Pfn2, PageTableFrameNumber);
#endif
/* Set the special pending delete marker */
MI_SET_PFN_DELETED(Pfn1);
/* And now delete the actual stack page */
MiDecrementShareCount(Pfn1, PageFrameNumber);
}
//
// Next one
//
PointerPte--;
}
//
// We should be at the guard page now
//
ASSERT(PointerPte->u.Hard.Valid == 0);
/* Release the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
@ -301,7 +300,7 @@ MmCreateKernelStack(IN BOOLEAN GuiStack,
KIRQL OldIrql;
PFN_NUMBER PageFrameIndex;
ULONG i;
//
// Calculate pages needed
//
@ -312,7 +311,7 @@ MmCreateKernelStack(IN BOOLEAN GuiStack,
//
StackPtes = BYTES_TO_PAGES(KERNEL_LARGE_STACK_SIZE);
StackPages = BYTES_TO_PAGES(KERNEL_LARGE_STACK_COMMIT);
}
else
{
@ -322,37 +321,37 @@ MmCreateKernelStack(IN BOOLEAN GuiStack,
StackPtes = BYTES_TO_PAGES(KERNEL_STACK_SIZE);
StackPages = StackPtes;
}
//
// Reserve stack pages, plus a guard page
//
StackPte = MiReserveSystemPtes(StackPtes + 1, SystemPteSpace);
if (!StackPte) return NULL;
//
// Get the stack address
//
BaseAddress = MiPteToAddress(StackPte + StackPtes + 1);
//
// Select the right PTE address where we actually start committing pages
//
PointerPte = StackPte;
if (GuiStack) PointerPte += BYTES_TO_PAGES(KERNEL_LARGE_STACK_SIZE -
KERNEL_LARGE_STACK_COMMIT);
/* Setup the temporary invalid PTE */
MI_MAKE_SOFTWARE_PTE(&InvalidPte, MM_NOACCESS);
/* Setup the template stack PTE */
MI_MAKE_HARDWARE_PTE_KERNEL(&TempPte, PointerPte + 1, MM_READWRITE, 0);
//
// Acquire the PFN DB lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
//
// Loop each stack page
//
@ -362,7 +361,7 @@ MmCreateKernelStack(IN BOOLEAN GuiStack,
// Next PTE
//
PointerPte++;
/* Get a page and write the current invalid PTE */
MI_SET_USAGE(MI_USAGE_KERNEL_STACK);
MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
@ -371,7 +370,7 @@ MmCreateKernelStack(IN BOOLEAN GuiStack,
/* Initialize the PFN entry for this page */
MiInitializePfn(PageFrameIndex, PointerPte, 1);
/* Write the valid PTE */
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
MI_WRITE_VALID_PTE(PointerPte, TempPte);
@ -381,7 +380,7 @@ MmCreateKernelStack(IN BOOLEAN GuiStack,
// Release the PFN lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// Return the stack address
//
@ -398,25 +397,25 @@ MmGrowKernelStackEx(IN PVOID StackPointer,
KIRQL OldIrql;
MMPTE TempPte, InvalidPte;
PFN_NUMBER PageFrameIndex;
//
// Make sure the stack did not overflow
//
ASSERT(((ULONG_PTR)Thread->StackBase - (ULONG_PTR)Thread->StackLimit) <=
(KERNEL_LARGE_STACK_SIZE + PAGE_SIZE));
//
// Get the current stack limit
//
LimitPte = MiAddressToPte(Thread->StackLimit);
ASSERT(LimitPte->u.Hard.Valid == 1);
//
// Get the new one and make sure this isn't a retarded request
//
NewLimitPte = MiAddressToPte((PVOID)((ULONG_PTR)StackPointer - GrowSize));
if (NewLimitPte == LimitPte) return STATUS_SUCCESS;
//
// Now make sure you're not going past the reserved space
//
@ -430,15 +429,15 @@ MmGrowKernelStackEx(IN PVOID StackPointer,
DPRINT1("Thread wants too much stack\n");
return STATUS_STACK_OVERFLOW;
}
//
// Calculate the number of new pages
//
LimitPte--;
/* Setup the temporary invalid PTE */
MI_MAKE_SOFTWARE_PTE(&InvalidPte, MM_NOACCESS);
//
// Acquire the PFN DB lock
//
@ -457,19 +456,19 @@ MmGrowKernelStackEx(IN PVOID StackPointer,
/* Initialize the PFN entry for this page */
MiInitializePfn(PageFrameIndex, LimitPte, 1);
/* Setup the template stack PTE */
MI_MAKE_HARDWARE_PTE_KERNEL(&TempPte, LimitPte, MM_READWRITE, PageFrameIndex);
/* Write the valid PTE */
MI_WRITE_VALID_PTE(LimitPte--, TempPte);
}
//
// Release the PFN lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
//
// Set the new limit
//
@ -493,7 +492,7 @@ MmSetMemoryPriorityProcess(IN PEPROCESS Process,
IN UCHAR MemoryPriority)
{
UCHAR OldPriority;
//
// Check if we have less then 16MB of Physical Memory
//
@ -505,13 +504,13 @@ MmSetMemoryPriorityProcess(IN PEPROCESS Process,
//
MemoryPriority = MEMORY_PRIORITY_BACKGROUND;
}
//
// Save the old priority and update it
//
OldPriority = (UCHAR)Process->Vm.Flags.MemoryPriority;
Process->Vm.Flags.MemoryPriority = MemoryPriority;
//
// Return the old priority
//
@ -524,12 +523,12 @@ MmGetSessionLocaleId(VOID)
{
PEPROCESS Process;
PAGED_CODE();
//
// Get the current process
//
Process = PsGetCurrentProcess();
//
// Check if it's the Session Leader
//
@ -548,7 +547,7 @@ MmGetSessionLocaleId(VOID)
#endif
}
}
//
// Not a session leader, return the default
//
@ -572,12 +571,12 @@ MmCreatePeb(IN PEPROCESS Process,
KAFFINITY ProcessAffinityMask = 0;
SectionOffset.QuadPart = (ULONGLONG)0;
*BasePeb = NULL;
//
// Attach to Process
//
KeAttachProcess(&Process->Pcb);
//
// Allocate the PEB
//
@ -598,7 +597,7 @@ MmCreatePeb(IN PEPROCESS Process,
MEM_TOP_DOWN,
PAGE_READONLY);
if (!NT_SUCCESS(Status)) return Status;
//
// Use SEH in case we can't load the PEB
//
@ -608,7 +607,7 @@ MmCreatePeb(IN PEPROCESS Process,
// Initialize the PEB
//
RtlZeroMemory(Peb, sizeof(PEB));
//
// Set up data
//
@ -616,14 +615,14 @@ MmCreatePeb(IN PEPROCESS Process,
Peb->InheritedAddressSpace = InitialPeb->InheritedAddressSpace;
Peb->Mutant = InitialPeb->Mutant;
Peb->ImageUsesLargePages = InitialPeb->ImageUsesLargePages;
//
// NLS
//
Peb->AnsiCodePageData = (PCHAR)TableBase + ExpAnsiCodePageDataOffset;
Peb->OemCodePageData = (PCHAR)TableBase + ExpOemCodePageDataOffset;
Peb->UnicodeCaseTableData = (PCHAR)TableBase + ExpUnicodeCaseTableDataOffset;
//
// Default Version Data (could get changed below)
//
@ -632,7 +631,7 @@ MmCreatePeb(IN PEPROCESS Process,
Peb->OSBuildNumber = (USHORT)(NtBuildNumber & 0x3FFF);
Peb->OSPlatformId = 2; /* VER_PLATFORM_WIN32_NT */
Peb->OSCSDVersion = (USHORT)CmNtCSDVersion;
//
// Heap and Debug Data
//
@ -648,7 +647,7 @@ MmCreatePeb(IN PEPROCESS Process,
*/
Peb->MaximumNumberOfHeaps = (PAGE_SIZE - sizeof(PEB)) / sizeof(PVOID);
Peb->ProcessHeaps = (PVOID*)(Peb + 1);
//
// Session ID
//
@ -663,7 +662,7 @@ MmCreatePeb(IN PEPROCESS Process,
_SEH2_YIELD(return _SEH2_GetExceptionCode());
}
_SEH2_END;
//
// Use SEH in case we can't load the image
//
@ -684,7 +683,7 @@ MmCreatePeb(IN PEPROCESS Process,
_SEH2_YIELD(return STATUS_INVALID_IMAGE_PROTECT);
}
_SEH2_END;
//
// Parse the headers
//
@ -711,7 +710,7 @@ MmCreatePeb(IN PEPROCESS Process,
sizeof(IMAGE_LOAD_CONFIG_DIRECTORY),
sizeof(ULONG));
}
//
// Write subsystem data
//
@ -732,7 +731,7 @@ MmCreatePeb(IN PEPROCESS Process,
Peb->OSBuildNumber = (NtHeaders->OptionalHeader.Win32VersionValue >> 16) & 0x3FFF;
Peb->OSPlatformId = (NtHeaders->OptionalHeader.Win32VersionValue >> 30) ^ 2;
}
//
// Process the image config data overrides if specfied
//
@ -748,7 +747,7 @@ MmCreatePeb(IN PEPROCESS Process,
//
Peb->OSCSDVersion = ImageConfigData->CSDVersion;
}
//
// Process affinity mask ovverride
//
@ -760,7 +759,7 @@ MmCreatePeb(IN PEPROCESS Process,
ProcessAffinityMask = ImageConfigData->ProcessAffinityMask;
}
}
//
// Check if this is a UP image
if (Characteristics & IMAGE_FILE_UP_SYSTEM_ONLY)
@ -788,7 +787,7 @@ MmCreatePeb(IN PEPROCESS Process,
}
_SEH2_END;
}
//
// Detach from the Process
//
@ -807,18 +806,18 @@ MmCreateTeb(IN PEPROCESS Process,
PTEB Teb;
NTSTATUS Status = STATUS_SUCCESS;
*BaseTeb = NULL;
//
// Attach to Target
//
KeAttachProcess(&Process->Pcb);
//
// Allocate the TEB
//
Status = MiCreatePebOrTeb(Process, sizeof(TEB), (PULONG_PTR)&Teb);
ASSERT(NT_SUCCESS(Status));
//
// Use SEH in case we can't load the TEB
//
@ -828,18 +827,18 @@ MmCreateTeb(IN PEPROCESS Process,
// Initialize the PEB
//
RtlZeroMemory(Teb, sizeof(TEB));
//
// Set TIB Data
//
Teb->NtTib.ExceptionList = EXCEPTION_CHAIN_END;
Teb->NtTib.Self = (PNT_TIB)Teb;
//
// Identify this as an OS/2 V3.0 ("Cruiser") TIB
//
Teb->NtTib.Version = 30 << 8;
//
// Set TEB Data
//
@ -847,7 +846,7 @@ MmCreateTeb(IN PEPROCESS Process,
Teb->RealClientId = *ClientId;
Teb->ProcessEnvironmentBlock = Process->Peb;
Teb->CurrentLocale = PsDefaultThreadLocaleId;
//
// Check if we have a grandparent TEB
//
@ -912,7 +911,7 @@ MiInitializeWorkingSetList(IN PEPROCESS CurrentProcess)
MmWorkingSetList->FirstDynamic = 2;
MmWorkingSetList->NextSlot = 3;
MmWorkingSetList->LastInitializedWsle = 4;
/* The rule is that the owner process is always in the FLINK of the PDE's PFN entry */
Pfn1 = MiGetPfnEntry(MiAddressToPte(PDE_BASE)->u.Hard.PageFrameNumber);
ASSERT(Pfn1->u4.PteFrame == MiGetPfnEntryIndex(Pfn1));
@ -940,14 +939,14 @@ MmInitializeProcessAddressSpace(IN PEPROCESS Process,
PCHAR Destination;
USHORT Length = 0;
MMPTE TempPte;
/* We should have a PDE */
ASSERT(Process->Pcb.DirectoryTableBase[0] != 0);
ASSERT(Process->PdeUpdateNeeded == FALSE);
/* Attach to the process */
KeAttachProcess(&Process->Pcb);
/* The address space should now been in phase 1 or 0 */
ASSERT(Process->AddressSpaceInitialized <= 1);
Process->AddressSpaceInitialized = 2;
@ -972,7 +971,7 @@ MmInitializeProcessAddressSpace(IN PEPROCESS Process,
PointerPde = MiAddressToPde(HYPER_SPACE);
PageFrameNumber = PFN_FROM_PTE(PointerPde);
MiInitializePfn(PageFrameNumber, (PMMPTE)PointerPde, TRUE);
/* Setup the PFN for the PTE for the working set */
PointerPte = MiAddressToPte(MI_WORKING_SET_LIST);
MI_MAKE_HARDWARE_PTE(&TempPte, PointerPte, MM_READWRITE, 0);
@ -1057,7 +1056,7 @@ MmInitializeProcessAddressSpace(IN PEPROCESS Process,
/* Save the pointer */
Process->SectionBaseAddress = ImageBase;
}
/* Be nice and detach */
KeDetachProcess();
@ -1092,7 +1091,7 @@ NTAPI
INIT_FUNCTION
MmInitializeHandBuiltProcess2(IN PEPROCESS Process)
{
/* Lock the VAD, ARM3-owned ranges away */
/* Lock the VAD, ARM3-owned ranges away */
MiRosTakeOverPebTebRanges(Process);
return STATUS_SUCCESS;
}
@ -1116,13 +1115,13 @@ MmCreateProcessAddressSpace(IN ULONG MinWs,
/* Choose a process color */
Process->NextPageColor = RtlRandom(&MmProcessColorSeed);
/* Setup the hyperspace lock */
KeInitializeSpinLock(&Process->HyperSpaceLock);
/* Lock PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Get a zero page for the PDE, if possible */
Color = MI_GET_NEXT_PROCESS_COLOR(Process);
MI_SET_USAGE(MI_USAGE_PAGE_DIRECTORY);
@ -1131,13 +1130,13 @@ MmCreateProcessAddressSpace(IN ULONG MinWs,
{
/* No zero pages, grab a free one */
PdeIndex = MiRemoveAnyPage(Color);
/* Zero it outside the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
MiZeroPhysicalPage(PdeIndex);
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
}
/* Get a zero page for hyperspace, if possible */
MI_SET_USAGE(MI_USAGE_PAGE_DIRECTORY);
Color = MI_GET_NEXT_PROCESS_COLOR(Process);
@ -1146,7 +1145,7 @@ MmCreateProcessAddressSpace(IN ULONG MinWs,
{
/* No zero pages, grab a free one */
HyperIndex = MiRemoveAnyPage(Color);
/* Zero it outside the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
MiZeroPhysicalPage(HyperIndex);
@ -1161,7 +1160,7 @@ MmCreateProcessAddressSpace(IN ULONG MinWs,
{
/* No zero pages, grab a free one */
WsListIndex = MiRemoveAnyPage(Color);
/* Zero it outside the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
MiZeroPhysicalPage(WsListIndex);
@ -1183,7 +1182,7 @@ MmCreateProcessAddressSpace(IN ULONG MinWs,
/* Make sure we don't already have a page directory setup */
ASSERT(Process->Pcb.DirectoryTableBase[0] == 0);
/* Get a PTE to map hyperspace */
PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
ASSERT(PointerPte != NULL);
@ -1268,16 +1267,16 @@ MmCleanProcessAddressSpace(IN PEPROCESS Process)
PMMVAD Vad;
PMM_AVL_TABLE VadTree;
PETHREAD Thread = PsGetCurrentThread();
/* Only support this */
ASSERT(Process->AddressSpaceInitialized == 2);
/* Lock the process address space from changes */
MmLockAddressSpace(&Process->Vm);
/* VM is deleted now */
Process->VmDeleted = TRUE;
/* Enumerate the VADs */
VadTree = &Process->VadRoot;
while (VadTree->NumberGenericTableElements)
@ -1294,7 +1293,7 @@ MmCleanProcessAddressSpace(IN PEPROCESS Process)
/* Only regular VADs supported for now */
ASSERT(Vad->u.VadFlags.VadType == VadNone);
/* Check if this is a section VAD */
if (!(Vad->u.VadFlags.PrivateMemory) && (Vad->ControlArea))
{
@ -1307,11 +1306,11 @@ MmCleanProcessAddressSpace(IN PEPROCESS Process)
MiDeleteVirtualAddresses(Vad->StartingVpn << PAGE_SHIFT,
(Vad->EndingVpn << PAGE_SHIFT) | (PAGE_SIZE - 1),
Vad);
/* Release the working set */
MiUnlockProcessWorkingSet(Process, Thread);
}
/* Skip ARM3 fake VADs, they'll be freed by MmDeleteProcessAddresSpace */
if (Vad->u.VadFlags.Spare == 1)
{
@ -1319,11 +1318,11 @@ MmCleanProcessAddressSpace(IN PEPROCESS Process)
Vad->u.VadFlags.Spare = 2;
continue;
}
/* Free the VAD memory */
ExFreePool(Vad);
}
/* Release the address space */
MmUnlockAddressSpace(&Process->Vm);
}

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::SECTION"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -82,14 +81,14 @@ MiMakeProtectionMask(IN ULONG Protect)
/* PAGE_EXECUTE_WRITECOMBINE is theoretically the maximum */
if (Protect >= (PAGE_WRITECOMBINE * 2)) return MM_INVALID_PROTECTION;
/*
* Windows API protection mask can be understood as two bitfields, differing
* by whether or not execute rights are being requested
*/
Mask1 = Protect & 0xF;
Mask2 = (Protect >> 4) & 0xF;
/* Check which field is there */
if (!Mask1)
{
@ -103,10 +102,10 @@ MiMakeProtectionMask(IN ULONG Protect)
if (Mask2) return MM_INVALID_PROTECTION;
ProtectMask = MmUserProtectionToMask1[Mask1];
}
/* Make sure the final mask is a valid one */
if (ProtectMask == MM_INVALID_PROTECTION) return MM_INVALID_PROTECTION;
/* Check for PAGE_GUARD option */
if (Protect & PAGE_GUARD)
{
@ -117,41 +116,41 @@ MiMakeProtectionMask(IN ULONG Protect)
/* Fail such requests */
return MM_INVALID_PROTECTION;
}
/* This actually turns on guard page in this scenario! */
ProtectMask |= MM_DECOMMIT;
}
/* Check for nocache option */
if (Protect & PAGE_NOCACHE)
{
/* The earlier check should've eliminated this possibility */
ASSERT((Protect & PAGE_GUARD) == 0);
/* Check for no-access page or write combine page */
if ((ProtectMask == MM_NOACCESS) || (Protect & PAGE_WRITECOMBINE))
{
/* Such a request is invalid */
return MM_INVALID_PROTECTION;
}
/* Add the PTE flag */
ProtectMask |= MM_NOCACHE;
}
/* Check for write combine option */
if (Protect & PAGE_WRITECOMBINE)
{
/* The two earlier scenarios should've caught this */
ASSERT((Protect & (PAGE_GUARD | PAGE_NOACCESS)) == 0);
/* Don't allow on no-access pages */
if (ProtectMask == MM_NOACCESS) return MM_INVALID_PROTECTION;
/* This actually turns on write-combine in this scenario! */
ProtectMask |= MM_NOACCESS;
}
/* Return the final MM PTE protection mask */
return ProtectMask;
}
@ -170,7 +169,7 @@ MiInitializeSystemSpaceMap(IN PVOID InputSession OPTIONAL)
/* Initialize the system space lock */
Session->SystemSpaceViewLockPointer = &Session->SystemSpaceViewLock;
KeInitializeGuardedMutex(Session->SystemSpaceViewLockPointer);
/* Set the start address */
Session->SystemSpaceViewStart = MiSystemViewStart;
@ -202,7 +201,7 @@ MiInitializeSystemSpaceMap(IN PVOID InputSession OPTIONAL)
' mM');
ASSERT(Session->SystemSpaceViewTable != NULL);
RtlZeroMemory(Session->SystemSpaceViewTable, AllocSize);
/* Success */
return TRUE;
}
@ -263,7 +262,7 @@ MiAddMappedPtes(IN PMMPTE FirstPte,
ASSERT(ControlArea->u.Flags.GlobalOnlyPerSession == 0);
ASSERT(ControlArea->u.Flags.Rom == 0);
ASSERT(ControlArea->FilePointer == NULL);
/* Sanity checks */
ASSERT(PteCount != 0);
ASSERT(ControlArea->NumberOfMappedViews >= 1);
@ -292,10 +291,10 @@ MiAddMappedPtes(IN PMMPTE FirstPte,
UNIMPLEMENTED;
while (TRUE);
}
/* The PTE should be completely clear */
ASSERT(PointerPte->u.Long == 0);
/* Build the prototype PTE and write it */
MI_MAKE_PROTOTYPE_PTE(&TempPte, ProtoPte);
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
@ -327,7 +326,7 @@ MiFillSystemPageDirectory(IN PVOID Base,
/* Find the system double-mapped PDE that describes this mapping */
SystemMapPde = &MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)];
/* Use the PDE template and loop the PDEs */
TempPde = ValidKernelPde;
while (PointerPde <= LastPde)
@ -352,7 +351,7 @@ MiFillSystemPageDirectory(IN PVOID Base,
/* Make the system PDE entry valid */
MI_WRITE_VALID_PDE(SystemMapPde, TempPde);
/* The system PDE entry might be the PDE itself, so check for this */
if (PointerPde->u.Hard.Valid == 0)
{
@ -375,10 +374,10 @@ MiCheckPurgeAndUpMapCount(IN PCONTROL_AREA ControlArea,
IN BOOLEAN FailIfSystemViews)
{
KIRQL OldIrql;
/* Flag not yet supported */
ASSERT(FailIfSystemViews == FALSE);
/* Lock the PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
@ -412,12 +411,12 @@ MiLocateSubsection(IN PMMVAD Vad,
/* Get the subsection */
Subsection = (PSUBSECTION)(ControlArea + 1);
/* We only support single-subsection segments */
ASSERT(Subsection->SubsectionBase != NULL);
ASSERT(Vad->FirstPrototypePte >= Subsection->SubsectionBase);
ASSERT(Vad->FirstPrototypePte < &Subsection->SubsectionBase[Subsection->PtesInSubsection]);
/* Compute the PTE offset */
PteOffset = (ULONG_PTR)Vpn - Vad->StartingVpn;
PteOffset += Vad->FirstPrototypePte - Subsection->SubsectionBase;
@ -425,7 +424,7 @@ MiLocateSubsection(IN PMMVAD Vad,
/* Again, we only support single-subsection segments */
ASSERT(PteOffset < 0xF0000000);
ASSERT(PteOffset < Subsection->PtesInSubsection);
/* Return the subsection */
return Subsection;
}
@ -448,7 +447,7 @@ MiSegmentDelete(IN PSEGMENT Segment)
/* Make sure control area is on the right delete path */
ASSERT(ControlArea->u.Flags.BeingDeleted == 1);
ASSERT(ControlArea->WritableUserReferences == 0);
/* These things are not supported yet */
ASSERT(ControlArea->DereferenceList.Flink == NULL);
ASSERT(!(ControlArea->u.Flags.Image) & !(ControlArea->u.Flags.File));
@ -462,7 +461,7 @@ MiSegmentDelete(IN PSEGMENT Segment)
/* Lock the PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Check if the master PTE is invalid */
PteForProto = MiAddressToPte(PointerPte);
if (!PteForProto->u.Hard.Valid)
@ -497,7 +496,7 @@ MiSegmentDelete(IN PSEGMENT Segment)
PointerPte->u.Long = 0;
PointerPte++;
}
/* Release the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
@ -513,7 +512,7 @@ MiCheckControlArea(IN PCONTROL_AREA ControlArea,
{
BOOLEAN DeleteSegment = FALSE;
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
/* Check if this is the last reference or view */
if (!(ControlArea->NumberOfMappedViews) &&
!(ControlArea->NumberOfSectionReferences))
@ -531,7 +530,7 @@ MiCheckControlArea(IN PCONTROL_AREA ControlArea,
/* Release the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Delete the segment if needed */
if (DeleteSegment)
{
@ -557,7 +556,7 @@ MiRemoveMappedView(IN PEPROCESS CurrentProcess,
ASSERT(Vad->u2.VadFlags2.ExtendableFile == FALSE);
ASSERT(ControlArea);
ASSERT(ControlArea->FilePointer == NULL);
/* Delete the actual virtual memory pages */
MiDeleteVirtualAddresses(Vad->StartingVpn << PAGE_SHIFT,
(Vad->EndingVpn << PAGE_SHIFT) | (PAGE_SIZE - 1),
@ -600,17 +599,17 @@ MiMapViewInSystemSpace(IN PVOID Section,
ASSERT(ControlArea->u.Flags.GlobalOnlyPerSession == 0);
ASSERT(ControlArea->u.Flags.Rom == 0);
ASSERT(ControlArea->u.Flags.WasPurged == 0);
/* Increase the reference and map count on the control area, no purges yet */
Status = MiCheckPurgeAndUpMapCount(ControlArea, FALSE);
ASSERT(NT_SUCCESS(Status));
/* Get the section size at creation time */
SectionSize = ((PSECTION)Section)->SizeOfSection.LowPart;
/* If the caller didn't specify a view size, assume the whole section */
if (!(*ViewSize)) *ViewSize = SectionSize;
/* Check if the caller wanted a larger section than the view */
if (*ViewSize > SectionSize)
{
@ -622,7 +621,7 @@ MiMapViewInSystemSpace(IN PVOID Section,
/* Get the number of 64K buckets required for this mapping */
Buckets = *ViewSize / MI_SYSTEM_VIEW_BUCKET_SIZE;
if (*ViewSize & (MI_SYSTEM_VIEW_BUCKET_SIZE - 1)) Buckets++;
/* Check if the view is more than 4GB large */
if (Buckets >= MI_SYSTEM_VIEW_BUCKET_SIZE)
{
@ -645,7 +644,7 @@ MiMapViewInSystemSpace(IN PVOID Section,
BYTES_TO_PAGES(*ViewSize),
ControlArea);
ASSERT(NT_SUCCESS(Status));
/* Return the base adress of the mapping and success */
*MappedBase = Base;
return STATUS_SUCCESS;
@ -682,10 +681,10 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
ASSERT(ControlArea->u.Flags.Rom == 0);
ASSERT(ControlArea->FilePointer == NULL);
ASSERT(Segment->SegmentFlags.TotalNumberOfPtes4132 == 0);
/* Based sections not supported */
ASSERT(Section->Address.StartingVpn == 0);
/* These flags/parameters are not supported */
ASSERT((AllocationType & MEM_DOS_LIM) == 0);
ASSERT((AllocationType & MEM_RESERVE) == 0);
@ -708,7 +707,7 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
{
/* A size was specified, align it to a 64K boundary */
*ViewSize += SectionOffset->LowPart & (_64K - 1);
/* Align the offset as well to make this an aligned map */
SectionOffset->LowPart &= ~((ULONG)_64K - 1);
}
@ -725,11 +724,11 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
/* The offset must be in this segment's PTE chunk and it must be valid */
ASSERT(PteOffset < Segment->TotalNumberOfPtes);
ASSERT(((SectionOffset->QuadPart + *ViewSize + PAGE_SIZE - 1) >> PAGE_SHIFT) >= PteOffset);
/* In ARM3, only one subsection is used for now. It must contain these PTEs */
ASSERT(PteOffset < Subsection->PtesInSubsection);
ASSERT(Subsection->SubsectionBase != NULL);
/* In ARM3, only MEM_COMMIT is supported for now. The PTEs must've been committed */
ASSERT(Segment->NumberOfCommittedPages >= Segment->TotalNumberOfPtes);
@ -768,7 +767,7 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
/* Get the ending address, which is the last piece we need for the VAD */
EndingAddress = (StartAddress + *ViewSize - 1) | (PAGE_SIZE - 1);
/* A VAD can now be allocated. Do so and zero it out */
Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD), 'ldaV');
ASSERT(Vad);
@ -796,13 +795,13 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
/* Make sure the last PTE is valid and still within the subsection */
ASSERT(PteOffset < Subsection->PtesInSubsection);
ASSERT(Vad->FirstPrototypePte <= Vad->LastContiguousPte);
/* FIXME: Should setup VAD bitmap */
Status = STATUS_SUCCESS;
/* Pretend as if we own the working set */
MiLockProcessWorkingSet(Process, Thread);
/* Insert the VAD */
MiInsertVad(Vad, Process);
@ -836,7 +835,7 @@ MiCreatePagingFileMap(OUT PSEGMENT *Segment,
/* No large pages in ARM3 yet */
ASSERT((AllocationAttributes & SEC_LARGE_PAGES) == 0);
/* Pagefile-backed sections need a known size */
if (!(*MaximumSize)) return STATUS_INVALID_PARAMETER_4;
@ -861,7 +860,7 @@ MiCreatePagingFileMap(OUT PSEGMENT *Segment,
'tSmM');
ASSERT(NewSegment);
*Segment = NewSegment;
/* Now allocate the control area, which has the subsection structure */
ControlArea = ExAllocatePoolWithTag(NonPagedPool,
sizeof(CONTROL_AREA) + sizeof(SUBSECTION),
@ -899,14 +898,14 @@ MiCreatePagingFileMap(OUT PSEGMENT *Segment,
/* The subsection's base address is the first Prototype PTE in the segment */
Subsection->SubsectionBase = PointerPte;
/* Start with an empty PTE, unless this is a commit operation */
TempPte.u.Long = 0;
if (AllocationAttributes & SEC_COMMIT)
{
/* In which case, write down the protection mask in the Prototype PTEs */
TempPte.u.Soft.Protection = ProtectionMask;
/* For accounting, also mark these pages as being committed */
NewSegment->NumberOfCommittedPages = PteCount;
}
@ -926,7 +925,7 @@ MmGetFileObjectForSection(IN PVOID SectionObject)
PSECTION_OBJECT Section;
ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
ASSERT(SectionObject != NULL);
/* Check if it's an ARM3, or ReactOS section */
if ((ULONG_PTR)SectionObject & 1)
{
@ -947,7 +946,7 @@ MmGetFileNameForFileObject(IN PFILE_OBJECT FileObject,
POBJECT_NAME_INFORMATION ObjectNameInfo;
NTSTATUS Status;
ULONG ReturnLength;
/* Allocate memory for our structure */
ObjectNameInfo = ExAllocatePoolWithTag(PagedPool, 1024, ' mM');
if (!ObjectNameInfo) return STATUS_NO_MEMORY;
@ -1038,7 +1037,7 @@ InvalidAddress:
/* Unlock address space */
MmUnlockAddressSpace(AddressSpace);
/* Get the filename of the section */
Status = MmGetFileNameForSection(Section, &ModuleNameInformation);
}
@ -1047,7 +1046,7 @@ InvalidAddress:
/* Get the VAD */
Vad = MiLocateAddress(Address);
if (!Vad) goto InvalidAddress;
/* Make sure it's not a VM VAD */
if (Vad->u.VadFlags.PrivateMemory == 1)
{
@ -1056,22 +1055,22 @@ NotSection:
MmUnlockAddressSpace(AddressSpace);
return STATUS_SECTION_NOT_IMAGE;
}
/* Get the control area */
ControlArea = Vad->ControlArea;
if (!(ControlArea) || !(ControlArea->u.Flags.Image)) goto NotSection;
/* Get the file object */
FileObject = ControlArea->FilePointer;
ASSERT(FileObject != NULL);
ObReferenceObject(FileObject);
/* Unlock address space */
MmUnlockAddressSpace(AddressSpace);
/* Get the filename of the file object */
Status = MmGetFileNameForFileObject(FileObject, &ModuleNameInformation);
/* Dereference it */
ObDereferenceObject(FileObject);
}
@ -1092,7 +1091,7 @@ NotSection:
ExFreePoolWithTag(ModuleNameInformation, ' mM');
DPRINT("Found ModuleName %S by address %p\n", ModuleName->Buffer, Address);
}
/* Return status */
return Status;
}
@ -1174,7 +1173,7 @@ MmCreateArm3Section(OUT PVOID *SectionObject,
ASSERT(ControlArea->u.Flags.GlobalOnlyPerSession == 0);
ASSERT(ControlArea->u.Flags.Rom == 0);
ASSERT(ControlArea->u.Flags.WasPurged == 0);
/* A pagefile-backed mapping only has one subsection, and this is all ARM3 supports */
Subsection = (PSUBSECTION)(ControlArea + 1);
ASSERT(Subsection->NextSubsection == NULL);
@ -1228,15 +1227,15 @@ MmMapViewOfArm3Section(IN PVOID SectionObject,
/* Get the segment and control area */
Section = (PSECTION)SectionObject;
ControlArea = Section->Segment->ControlArea;
/* These flags/states are not yet supported by ARM3 */
ASSERT(Section->u.Flags.Image == 0);
ASSERT(Section->u.Flags.NoCache == 0);
ASSERT(Section->u.Flags.WriteCombined == 0);
ASSERT((AllocationType & MEM_RESERVE) == 0);
ASSERT(ControlArea->u.Flags.PhysicalMemory == 0);
#if 0
/* FIXME: Check if the mapping protection is compatible with the create */
if (!MiIsProtectionCompatible(Section->InitialPageProtection, Protect))
@ -1296,7 +1295,7 @@ MmMapViewOfArm3Section(IN PVOID SectionObject,
DPRINT1("The protection is invalid\n");
return STATUS_INVALID_PAGE_PROTECTION;
}
/* We only handle pagefile-backed sections, which cannot be writecombined */
if (Protect & PAGE_WRITECOMBINE)
{
@ -1310,7 +1309,7 @@ MmMapViewOfArm3Section(IN PVOID SectionObject,
KeStackAttachProcess(&Process->Pcb, &ApcState);
Attached = TRUE;
}
/* Lock the address space and make sure the process is alive */
MmLockAddressSpace(&Process->Vm);
if (!Process->VmDeleted)
@ -1334,7 +1333,7 @@ MmMapViewOfArm3Section(IN PVOID SectionObject,
DPRINT1("The process is dying\n");
Status = STATUS_PROCESS_IS_TERMINATING;
}
/* Unlock the address space and detatch if needed, then return status */
MmUnlockAddressSpace(&Process->Vm);
if (Attached) KeUnstackDetachProcess(&ApcState);
@ -1454,9 +1453,9 @@ NtCreateSection(OUT PHANDLE SectionHandle,
FileHandle,
NULL);
if (!NT_SUCCESS(Status)) return Status;
/* FIXME: Should zero last page for a file mapping */
/* Now insert the object */
Status = ObInsertObject(SectionObject,
NULL,
@ -1558,7 +1557,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
ACCESS_MASK DesiredAccess;
ULONG ProtectionMask;
KPROCESSOR_MODE PreviousMode = ExGetPreviousMode();
/* Check for invalid zero bits */
if (ZeroBits > 21) // per-arch?
{
@ -1572,7 +1571,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
DPRINT1("Invalid inherit disposition\n");
return STATUS_INVALID_PARAMETER_8;
}
/* Allow only valid allocation types */
if ((AllocationType & ~(MEM_TOP_DOWN | MEM_LARGE_PAGES | MEM_DOS_LIM |
SEC_NO_CHANGE | MEM_RESERVE)))
@ -1605,7 +1604,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
ProbeForWritePointer(BaseAddress);
ProbeForWriteSize_t(ViewSize);
}
/* Check if a section offset was given */
if (SectionOffset)
{
@ -1613,7 +1612,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
if (PreviousMode != KernelMode) ProbeForWriteLargeInteger(SectionOffset);
SafeSectionOffset = *SectionOffset;
}
/* Capture the other parameters */
SafeBaseAddress = *BaseAddress;
SafeViewSize = *ViewSize;
@ -1645,7 +1644,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
DPRINT1("Invalid zero bits\n");
return STATUS_INVALID_PARAMETER_4;
}
/* Reference the process */
Status = ObReferenceObjectByHandle(ProcessHandle,
PROCESS_VM_OPERATION,
@ -1667,7 +1666,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
ObDereferenceObject(Process);
return Status;
}
/* Now do the actual mapping */
Status = MmMapViewOfSection(Section,
Process,
@ -1692,7 +1691,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
SafeSectionOffset.LowPart,
SafeViewSize);
}
/* Return data only on success */
if (NT_SUCCESS(Status))
{
@ -1744,7 +1743,7 @@ NtUnmapViewOfSection(IN HANDLE ProcessHandle,
/* Unmap the view */
Status = MmUnmapViewOfSection(Process, BaseAddress);
/* Dereference the process and return status */
ObDereferenceObject(Process);
return Status;
@ -1782,7 +1781,7 @@ NtExtendSection(IN HANDLE SectionHandle,
/* Just read the size directly */
SafeNewMaximumSize = *NewMaximumSize;
}
/* Reference the section */
Status = ObReferenceObjectByHandle(SectionHandle,
SECTION_EXTEND_SIZE,
@ -1799,12 +1798,12 @@ NtExtendSection(IN HANDLE SectionHandle,
ObDereferenceObject(Section);
return STATUS_SECTION_NOT_EXTENDED;
}
/* FIXME: Do the work */
/* Dereference the section */
ObDereferenceObject(Section);
/* Enter SEH */
_SEH2_TRY
{
@ -1816,7 +1815,7 @@ NtExtendSection(IN HANDLE SectionHandle,
/* Nothing to do */
}
_SEH2_END;
/* Return the status */
return STATUS_NOT_IMPLEMENTED;
}

View file

@ -13,7 +13,6 @@
#define NDEBUG
#include <debug.h>
#line 16 "ARM³::LOADER"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -112,7 +111,7 @@ MiLoadImageSection(IN OUT PVOID *SectionPtr,
/* Not session load, shouldn't have an entry */
ASSERT(LdrEntry == NULL);
/* Attach to the system process */
KeStackAttachProcess(&PsInitialSystemProcess->Pcb, &ApcState);
@ -154,12 +153,12 @@ MiLoadImageSection(IN OUT PVOID *SectionPtr,
KeUnstackDetachProcess(&ApcState);
return Status;
}
/* Reserve system PTEs needed */
PteCount = ROUND_TO_PAGES(Section->ImageSection->ImageSize) >> PAGE_SHIFT;
PointerPte = MiReserveSystemPtes(PteCount, SystemPteSpace);
if (!PointerPte) return STATUS_INSUFFICIENT_RESOURCES;
/* New driver base */
LastPte = PointerPte + PteCount;
DriverBase = MiPteToAddress(PointerPte);
@ -182,7 +181,7 @@ MiLoadImageSection(IN OUT PVOID *SectionPtr,
pos = wcsrchr(FileName->Buffer, '\\');
len = wcslen(pos) * sizeof(WCHAR);
if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
}
}
#endif
TempPte.u.Hard.PageFrameNumber = MiAllocatePfn(PointerPte, MM_EXECUTE);
@ -192,7 +191,7 @@ MiLoadImageSection(IN OUT PVOID *SectionPtr,
/* Move on */
PointerPte++;
}
/* Copy the image */
RtlCopyMemory(DriverBase, Base, PteCount << PAGE_SHIFT);
@ -386,14 +385,14 @@ MiDereferenceImports(IN PLOAD_IMPORTS ImportList)
/* Then there's nothing to do */
return STATUS_SUCCESS;
}
/* Check for single-entry */
if ((ULONG_PTR)ImportList & MM_SYSLDR_SINGLE_ENTRY)
{
/* Set it up */
SingleEntry.Count = 1;
SingleEntry.Entry[0] = (PVOID)((ULONG_PTR)ImportList &~ MM_SYSLDR_SINGLE_ENTRY);
/* Use this as the import list */
ImportList = &SingleEntry;
}
@ -404,24 +403,24 @@ MiDereferenceImports(IN PLOAD_IMPORTS ImportList)
/* Get the entry */
LdrEntry = ImportList->Entry[i];
DPRINT1("%wZ <%wZ>\n", &LdrEntry->FullDllName, &LdrEntry->BaseDllName);
/* Skip boot loaded images */
if (LdrEntry->LoadedImports == MM_SYSLDR_BOOT_LOADED) continue;
/* Dereference the entry */
ASSERT(LdrEntry->LoadCount >= 1);
if (!--LdrEntry->LoadCount)
{
/* Save the import data in case unload fails */
CurrentImports = LdrEntry->LoadedImports;
/* This is the last entry */
LdrEntry->LoadedImports = MM_SYSLDR_NO_IMPORTS;
if (MiCallDllUnloadAndUnloadDll(LdrEntry))
{
/* Unloading worked, parse this DLL's imports too */
MiDereferenceImports(CurrentImports);
/* Check if we had valid imports */
if ((CurrentImports != MM_SYSLDR_BOOT_LOADED) ||
(CurrentImports != MM_SYSLDR_NO_IMPORTS) ||
@ -438,7 +437,7 @@ MiDereferenceImports(IN PLOAD_IMPORTS ImportList)
}
}
}
/* Done */
return STATUS_SUCCESS;
}
@ -1454,7 +1453,7 @@ MiReloadBootLoadedDrivers(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Remember the original address */
DllBase = LdrEntry->DllBase;
/* Loop the PTEs */
PointerPte = StartPte;
while (PointerPte < LastPte)
@ -1464,11 +1463,11 @@ MiReloadBootLoadedDrivers(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
ASSERT(Pfn1->u3.e1.Rom == 0);
Pfn1->u3.e1.Modified = TRUE;
/* Next */
PointerPte++;
}
/* Now reserve system PTEs for the image */
PointerPte = MiReserveSystemPtes(PteCount, SystemPteSpace);
if (!PointerPte)
@ -1477,7 +1476,7 @@ MiReloadBootLoadedDrivers(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
DPRINT1("[Mm0]: Couldn't allocate driver section!\n");
while (TRUE);
}
/* This is the new virtual address for the module */
LastPte = PointerPte + PteCount;
NewImageAddress = MiPteToAddress(PointerPte);
@ -1485,7 +1484,7 @@ MiReloadBootLoadedDrivers(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Sanity check */
DPRINT("[Mm0]: Copying from: %p to: %p\n", DllBase, NewImageAddress);
ASSERT(ExpInitializationPhase == 0);
/* Loop the new driver PTEs */
TempPte = ValidKernelPte;
while (PointerPte < LastPte)
@ -1504,7 +1503,7 @@ MiReloadBootLoadedDrivers(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
PointerPte++;
StartPte++;
}
/* Update position */
PointerPte -= PteCount;
@ -1547,7 +1546,7 @@ MiReloadBootLoadedDrivers(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
LdrEntry->EntryPoint = (PVOID)((ULONG_PTR)NewImageAddress +
NtHeader->OptionalHeader.AddressOfEntryPoint);
LdrEntry->SizeOfImage = PteCount << PAGE_SHIFT;
/* FIXME: We'll need to fixup the PFN linkage when switching to ARM3 */
}
}
@ -1568,7 +1567,7 @@ MiBuildImportsForBootDrivers(VOID)
ULONG_PTR DllBase, DllEnd;
ULONG Modules = 0, i, j = 0;
PIMAGE_IMPORT_DESCRIPTOR ImportDescriptor;
/* Initialize variables */
KernelEntry = HalEntry = LastEntry = NULL;
@ -1592,7 +1591,7 @@ MiBuildImportsForBootDrivers(VOID)
/* Found it */
HalEntry = LdrEntry;
}
/* Check if this is a driver DLL */
if (LdrEntry->Flags & LDRP_DRIVER_DEPENDENT_DLL)
{
@ -1611,9 +1610,9 @@ MiBuildImportsForBootDrivers(VOID)
else
{
/* No referencing needed */
LdrEntry->LoadCount = 0;
LdrEntry->LoadCount = 0;
}
/* Remember this came from the loader */
LdrEntry->LoadedImports = MM_SYSLDR_BOOT_LOADED;
@ -1621,10 +1620,10 @@ MiBuildImportsForBootDrivers(VOID)
NextEntry = NextEntry->Flink;
Modules++;
}
/* We must have at least found the kernel and HAL */
if (!(HalEntry) || (!KernelEntry)) return STATUS_NOT_FOUND;
/* Allocate the list */
EntryArray = ExAllocatePoolWithTag(PagedPool, Modules * sizeof(PVOID), 'TDmM');
if (!EntryArray) return STATUS_INSUFFICIENT_RESOURCES;
@ -1643,7 +1642,7 @@ MiBuildImportsForBootDrivers(VOID)
TRUE,
IMAGE_DIRECTORY_ENTRY_IAT,
&ImportSize);
if (!ImageThunk)
if (!ImageThunk)
#else
/* Get its imports */
ImportDescriptor = RtlImageDirectoryEntryToData(LdrEntry->DllBase,
@ -1658,12 +1657,12 @@ MiBuildImportsForBootDrivers(VOID)
NextEntry = NextEntry->Flink;
continue;
}
/* Clear the list and count the number of IAT thunks */
RtlZeroMemory(EntryArray, Modules * sizeof(PVOID));
#ifdef _WORKING_LOADER_
ImportSize /= sizeof(ULONG_PTR);
/* Scan the thunks */
for (i = 0, DllBase = 0, DllEnd = 0; i < ImportSize; i++, ImageThunk++)
#else
@ -1689,7 +1688,7 @@ MiBuildImportsForBootDrivers(VOID)
continue;
}
}
/* Loop the loaded module list to locate this address owner */
j = 0;
NextEntry2 = PsLoadedModuleList.Flink;
@ -1699,11 +1698,11 @@ MiBuildImportsForBootDrivers(VOID)
LdrEntry2 = CONTAINING_RECORD(NextEntry2,
LDR_DATA_TABLE_ENTRY,
InLoadOrderLinks);
/* Get the address range for this module */
DllBase = (ULONG_PTR)LdrEntry2->DllBase;
DllEnd = DllBase + LdrEntry2->SizeOfImage;
/* Check if this IAT entry matches it */
if ((*ImageThunk >= DllBase) && (*ImageThunk < DllEnd))
{
@ -1712,12 +1711,12 @@ MiBuildImportsForBootDrivers(VOID)
EntryArray[j] = LdrEntry2;
break;
}
/* Keep searching */
NextEntry2 = NextEntry2->Flink;
j++;
}
/* Do we have a thunk outside the range? */
if ((*ImageThunk < DllBase) || (*ImageThunk >= DllEnd))
{
@ -1729,19 +1728,19 @@ MiBuildImportsForBootDrivers(VOID)
LdrEntry, ImageThunk, *ImageThunk);
ASSERT(FALSE);
}
/* Reset if we hit this */
DllBase = 0;
}
#ifndef _WORKING_LOADER_
ImageThunk++;
}
i++;
ImportDescriptor++;
#endif
}
/* Now scan how many imports we really have */
for (i = 0, ImportSize = 0; i < Modules; i++)
{
@ -1755,7 +1754,7 @@ MiBuildImportsForBootDrivers(VOID)
ImportSize++;
}
}
/* Do we have any imports after all? */
if (!ImportSize)
{
@ -1776,10 +1775,10 @@ MiBuildImportsForBootDrivers(VOID)
LoadedImportsSize,
'TDmM');
ASSERT(LoadedImports);
/* Save the count */
LoadedImports->Count = ImportSize;
/* Now copy all imports */
for (i = 0, j = 0; i < Modules; i++)
{
@ -1795,25 +1794,25 @@ MiBuildImportsForBootDrivers(VOID)
j++;
}
}
/* Should had as many entries as we expected */
ASSERT(j == ImportSize);
LdrEntry->LoadedImports = LoadedImports;
}
/* Next */
NextEntry = NextEntry->Flink;
}
/* Free the initial array */
ExFreePool(EntryArray);
/* FIXME: Might not need to keep the HAL/Kernel imports around */
/* Kernel and HAL are loaded at boot */
KernelEntry->LoadedImports = MM_SYSLDR_BOOT_LOADED;
HalEntry->LoadedImports = MM_SYSLDR_BOOT_LOADED;
/* All worked well */
return STATUS_SUCCESS;
}
@ -1827,7 +1826,7 @@ MiLocateKernelSections(IN PLDR_DATA_TABLE_ENTRY LdrEntry)
PIMAGE_NT_HEADERS NtHeaders;
PIMAGE_SECTION_HEADER SectionHeader;
ULONG Sections, Size;
/* Get the kernel section header */
DllBase = (ULONG_PTR)LdrEntry->DllBase;
NtHeaders = RtlImageNtHeader((PVOID)DllBase);
@ -1839,7 +1838,7 @@ MiLocateKernelSections(IN PLDR_DATA_TABLE_ENTRY LdrEntry)
{
/* Grab the size of the section */
Size = max(SectionHeader->SizeOfRawData, SectionHeader->Misc.VirtualSize);
/* Check for .RSRC section */
if (*(PULONG)SectionHeader->Name == 'rsr.')
{
@ -1862,7 +1861,7 @@ MiLocateKernelSections(IN PLDR_DATA_TABLE_ENTRY LdrEntry)
{
/* Found Mm* Pool code */
MmPoolCodeStart = DllBase + SectionHeader->VirtualAddress;
MmPoolCodeEnd = ExPoolCodeStart + Size;
MmPoolCodeEnd = ExPoolCodeStart + Size;
}
}
else if ((*(PULONG)SectionHeader->Name == 'YSIM') &&
@ -1872,7 +1871,7 @@ MiLocateKernelSections(IN PLDR_DATA_TABLE_ENTRY LdrEntry)
MmPteCodeStart = DllBase + SectionHeader->VirtualAddress;
MmPteCodeEnd = ExPoolCodeStart + Size;
}
/* Keep going */
Sections--;
SectionHeader++;
@ -1900,7 +1899,7 @@ MiInitializeLoadedModuleList(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
LDR_DATA_TABLE_ENTRY,
InLoadOrderLinks);
PsNtosImageBase = (ULONG_PTR)LdrEntry->DllBase;
/* Locate resource section, pool code, and system pte code */
MiLocateKernelSections(LdrEntry);
@ -2012,11 +2011,11 @@ MiUseLargeDriverPage(IN ULONG NumberOfPtes,
/* Keep trying */
NextEntry = NextEntry->Flink;
}
/* If we didn't find the driver, it doesn't need large pages */
if (DriverFound == FALSE) return FALSE;
}
/* Nothing to do yet */
DPRINT1("Large pages not supported!\n");
return FALSE;
@ -2028,13 +2027,13 @@ MiComputeDriverProtection(IN BOOLEAN SessionSpace,
IN ULONG SectionProtection)
{
ULONG Protection = MM_ZERO_ACCESS;
/* Check if the caller gave anything */
if (SectionProtection)
{
/* Always turn on execute access */
SectionProtection |= IMAGE_SCN_MEM_EXECUTE;
/* Check if the registry setting is on or not */
if (!MmEnforceWriteProtection)
{
@ -2042,11 +2041,11 @@ MiComputeDriverProtection(IN BOOLEAN SessionSpace,
SectionProtection |= (IMAGE_SCN_MEM_WRITE | IMAGE_SCN_MEM_EXECUTE);
}
}
/* Convert to internal PTE flags */
if (SectionProtection & IMAGE_SCN_MEM_EXECUTE) Protection |= MM_EXECUTE;
if (SectionProtection & IMAGE_SCN_MEM_READ) Protection |= MM_READONLY;
/* Check for write access */
if (SectionProtection & IMAGE_SCN_MEM_WRITE)
{
@ -2062,10 +2061,10 @@ MiComputeDriverProtection(IN BOOLEAN SessionSpace,
Protection = (Protection & MM_EXECUTE) ? MM_EXECUTE_READWRITE : MM_READWRITE;
}
}
/* If there's no access at all by now, convert to internal no access flag */
if (Protection == MM_ZERO_ACCESS) Protection = MM_NOACCESS;
/* Return the computed PTE protection */
return Protection;
}
@ -2093,14 +2092,14 @@ MiWriteProtectSystemImage(IN PVOID ImageBase)
PMMPTE PointerPte, StartPte, LastPte, CurrentPte, ComboPte = NULL;
ULONG CurrentMask, CombinedMask = 0;
PAGED_CODE();
/* No need to write protect physical memory-backed drivers (large pages) */
if (MI_IS_PHYSICAL_ADDRESS(ImageBase)) return;
/* Get the image headers */
NtHeaders = RtlImageNtHeader(ImageBase);
if (!NtHeaders) return;
/* Check if this is a session driver or not */
if (!MI_IS_SESSION_ADDRESS(ImageBase))
{
@ -2114,13 +2113,13 @@ MiWriteProtectSystemImage(IN PVOID ImageBase)
DPRINT1("Session drivers not supported\n");
ASSERT(FALSE);
}
/* These are the only protection masks we care about */
ProtectionMask = IMAGE_SCN_MEM_WRITE | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_EXECUTE;
/* Calculate the number of pages this driver is occupying */
DriverPages = BYTES_TO_PAGES(NtHeaders->OptionalHeader.SizeOfImage);
/* Get the number of sections and the first section header */
Sections = NtHeaders->FileHeader.NumberOfSections;
ASSERT(Sections != 0);
@ -2132,7 +2131,7 @@ MiWriteProtectSystemImage(IN PVOID ImageBase)
{
/* Get the section size */
Size = max(Section->SizeOfRawData, Section->Misc.VirtualSize);
/* Get its virtual address */
BaseAddress = (ULONG_PTR)ImageBase + Section->VirtualAddress;
if (BaseAddress < CurrentAddress)
@ -2141,24 +2140,24 @@ MiWriteProtectSystemImage(IN PVOID ImageBase)
DPRINT1("Badly linked image!\n");
return;
}
/* Remember the current address */
CurrentAddress = BaseAddress + Size - 1;
/* Next */
Sections--;
Section++;
}
/* Get the number of sections and the first section header */
Sections = NtHeaders->FileHeader.NumberOfSections;
ASSERT(Sections != 0);
Section = IMAGE_FIRST_SECTION(NtHeaders);
/* Set the address at the end to initialize the loop */
CurrentAddress = (ULONG_PTR)Section + Sections - 1;
CurrentProtection = IMAGE_SCN_MEM_WRITE | IMAGE_SCN_MEM_READ;
/* Set the PTE points for the image, and loop its sections */
StartPte = MiAddressToPte(ImageBase);
LastPte = StartPte + DriverPages;
@ -2166,97 +2165,97 @@ MiWriteProtectSystemImage(IN PVOID ImageBase)
{
/* Get the section size */
Size = max(Section->SizeOfRawData, Section->Misc.VirtualSize);
/* Get its virtual address and PTE */
BaseAddress = (ULONG_PTR)ImageBase + Section->VirtualAddress;
PointerPte = MiAddressToPte(BaseAddress);
/* Check if we were already protecting a run, and found a new run */
if ((ComboPte) && (PointerPte > ComboPte))
{
/* Compute protection */
CombinedMask = MiComputeDriverProtection(FALSE, CombinedProtection);
/* Set it */
MiSetSystemCodeProtection(ComboPte, ComboPte, CombinedMask);
/* Check for overlap */
if (ComboPte == StartPte) StartPte++;
/* One done, reset variables */
ComboPte = NULL;
CombinedProtection = 0;
}
/* Break out when needed */
if (PointerPte >= LastPte) break;
/* Get the requested protection from the image header */
SectionProtection = Section->Characteristics & ProtectionMask;
if (SectionProtection == CurrentProtection)
{
/* Same protection, so merge the request */
CurrentAddress = BaseAddress + Size - 1;
/* Next */
Sections--;
Section++;
continue;
}
/* This is now a new section, so close up the old one */
CurrentPte = MiAddressToPte(CurrentAddress);
/* Check for overlap */
if (CurrentPte == PointerPte)
{
/* Skip the last PTE, since it overlaps with us */
CurrentPte--;
/* And set the PTE we will merge with */
ASSERT((ComboPte == NULL) || (ComboPte == PointerPte));
ComboPte = PointerPte;
/* Get the most flexible protection by merging both */
CombinedMask |= (SectionProtection | CurrentProtection);
}
/* Loop any PTEs left */
if (CurrentPte >= StartPte)
{
/* Sanity check */
ASSERT(StartPte < LastPte);
/* Make sure we don't overflow past the last PTE in the driver */
if (CurrentPte >= LastPte) CurrentPte = LastPte - 1;
ASSERT(CurrentPte >= StartPte);
/* Compute the protection and set it */
CurrentMask = MiComputeDriverProtection(FALSE, CurrentProtection);
MiSetSystemCodeProtection(StartPte, CurrentPte, CurrentMask);
}
/* Set new state */
StartPte = PointerPte;
CurrentAddress = BaseAddress + Size - 1;
CurrentProtection = SectionProtection;
/* Next */
Sections--;
Section++;
}
/* Is there a leftover section to merge? */
if (ComboPte)
{
/* Compute and set the protection */
CombinedMask = MiComputeDriverProtection(FALSE, CombinedProtection);
MiSetSystemCodeProtection(ComboPte, ComboPte, CombinedMask);
/* Handle overlap */
if (ComboPte == StartPte) StartPte++;
}
/* Finally, handle the last section */
CurrentPte = MiAddressToPte(CurrentAddress);
if ((StartPte < LastPte) && (CurrentPte >= StartPte))
@ -2264,7 +2263,7 @@ MiWriteProtectSystemImage(IN PVOID ImageBase)
/* Handle overlap */
if (CurrentPte >= LastPte) CurrentPte = LastPte - 1;
ASSERT(CurrentPte >= StartPte);
/* Compute and set the protection */
CurrentMask = MiComputeDriverProtection(FALSE, CurrentProtection);
MiSetSystemCodeProtection(StartPte, CurrentPte, CurrentMask);
@ -2281,17 +2280,17 @@ MiSetPagingOfDriver(IN PMMPTE PointerPte,
PFN_NUMBER PageCount = 0, PageFrameIndex;
PMMPFN Pfn1;
PAGED_CODE();
/* Get the driver's base address */
ImageBase = MiPteToAddress(PointerPte);
ASSERT(MI_IS_SESSION_IMAGE_ADDRESS(ImageBase) == FALSE);
/* If this is a large page, it's stuck in physical memory */
if (MI_IS_PHYSICAL_ADDRESS(ImageBase)) return;
/* Lock the working set */
MiLockWorkingSet(CurrentThread, &MmSystemCacheWs);
/* Loop the PTEs */
while (PointerPte <= LastPte)
{
@ -2301,18 +2300,18 @@ MiSetPagingOfDriver(IN PMMPTE PointerPte,
PageFrameIndex = PFN_FROM_PTE(PointerPte);
Pfn1 = MiGetPfnEntry(PageFrameIndex);
ASSERT(Pfn1->u2.ShareCount == 1);
/* No working sets in ReactOS yet */
PageCount++;
}
ImageBase = (PVOID)((ULONG_PTR)ImageBase + PAGE_SIZE);
PointerPte++;
}
/* Release the working set */
MiUnlockWorkingSet(CurrentThread, &MmSystemCacheWs);
/* Do we have any driver pages? */
if (PageCount)
{
@ -2331,16 +2330,16 @@ MiEnablePagingOfDriver(IN PLDR_DATA_TABLE_ENTRY LdrEntry)
PIMAGE_SECTION_HEADER Section;
PMMPTE PointerPte = NULL, LastPte = NULL;
if (MmDisablePagingExecutive) return;
/* Get the driver base address and its NT header */
ImageBase = (ULONG_PTR)LdrEntry->DllBase;
NtHeaders = RtlImageNtHeader((PVOID)ImageBase);
if (!NtHeaders) return;
/* Get the sections and their alignment */
Sections = NtHeaders->FileHeader.NumberOfSections;
Alignment = NtHeaders->OptionalHeader.SectionAlignment - 1;
/* Loop each section */
Section = IMAGE_FIRST_SECTION(NtHeaders);
while (Sections)
@ -2357,10 +2356,10 @@ MiEnablePagingOfDriver(IN PLDR_DATA_TABLE_ENTRY LdrEntry)
Section->
VirtualAddress));
}
/* Compute the size */
Size = max(Section->SizeOfRawData, Section->Misc.VirtualSize);
/* Find the last PTE that maps this section */
LastPte = MiAddressToPte(ImageBase +
Section->VirtualAddress +
@ -2378,12 +2377,12 @@ MiEnablePagingOfDriver(IN PLDR_DATA_TABLE_ENTRY LdrEntry)
PointerPte = NULL;
}
}
/* Keep searching */
Sections--;
Section++;
}
/* Handle the straggler */
if (PointerPte) MiSetPagingOfDriver(PointerPte, LastPte);
}
@ -2427,7 +2426,7 @@ MmCheckSystemImage(IN HANDLE ImageHandle,
PIMAGE_NT_HEADERS NtHeaders;
OBJECT_ATTRIBUTES ObjectAttributes;
PAGED_CODE();
/* Setup the object attributes */
InitializeObjectAttributes(&ObjectAttributes,
NULL,
@ -2485,7 +2484,7 @@ MmCheckSystemImage(IN HANDLE ImageHandle,
Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto Fail;
}
/* Make sure it's a real image */
NtHeaders = RtlImageNtHeader(ViewBase);
if (!NtHeaders)
@ -2494,7 +2493,7 @@ MmCheckSystemImage(IN HANDLE ImageHandle,
Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto Fail;
}
/* Make sure it's for the correct architecture */
if ((NtHeaders->FileHeader.Machine != IMAGE_FILE_MACHINE_NATIVE) ||
(NtHeaders->OptionalHeader.Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC))
@ -2606,7 +2605,7 @@ MmLoadSystemImage(IN PUNICODE_STRING FileName,
/* Check if we already have a name, use it instead */
if (LoadedName) BaseName = *LoadedName;
/* Check for loader snap debugging */
if (NtGlobalFlag & FLG_SHOW_LDR_SNAPS)
{
@ -2799,7 +2798,7 @@ LoaderScan:
ObDereferenceObject(Section);
Section = NULL;
}
/* Check for failure of the load earlier */
if (!NT_SUCCESS(Status)) goto Quickie;
@ -2812,7 +2811,7 @@ LoaderScan:
STATUS_INVALID_IMAGE_FORMAT);
if (!NT_SUCCESS(Status)) goto Quickie;
/* Get the NT Header */
NtHeader = RtlImageNtHeader(ModuleLoadBase);
@ -3016,7 +3015,7 @@ MiLookupDataTableEntry(IN PVOID Address)
PLDR_DATA_TABLE_ENTRY LdrEntry, FoundEntry = NULL;
PLIST_ENTRY NextEntry;
PAGED_CODE();
/* Loop entries */
NextEntry = PsLoadedModuleList.Flink;
do
@ -3025,7 +3024,7 @@ MiLookupDataTableEntry(IN PVOID Address)
LdrEntry = CONTAINING_RECORD(NextEntry,
LDR_DATA_TABLE_ENTRY,
InLoadOrderLinks);
/* Check if the address matches */
if ((Address >= LdrEntry->DllBase) &&
(Address < (PVOID)((ULONG_PTR)LdrEntry->DllBase +
@ -3035,11 +3034,11 @@ MiLookupDataTableEntry(IN PVOID Address)
FoundEntry = LdrEntry;
break;
}
/* Move on */
NextEntry = NextEntry->Flink;
} while(NextEntry != &PsLoadedModuleList);
/* Return the entry */
return FoundEntry;
}

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::SYSPTE"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -36,22 +35,22 @@ MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
KIRQL OldIrql;
PMMPTE PointerPte, NextPte, PreviousPte;
ULONG_PTR ClusterSize;
//
// Sanity check
//
ASSERT(Alignment <= PAGE_SIZE);
//
// Lock the system PTE space
//
OldIrql = KeAcquireQueuedSpinLock(LockQueueSystemSpaceLock);
//
// Get the first free cluster and make sure we have PTEs available
//
PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
if (PointerPte->u.List.NextEntry == ((ULONG)0xFFFFF))
if (PointerPte->u.List.NextEntry == ((ULONG)0xFFFFF))
{
//
// Fail
@ -59,13 +58,13 @@ MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
KeReleaseQueuedSpinLock(LockQueueSystemSpaceLock, OldIrql);
return NULL;
}
//
// Now move to the first free system PTE cluster
//
PreviousPte = PointerPte;
PreviousPte = PointerPte;
PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
//
// Loop each cluster
//
@ -80,7 +79,7 @@ MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
// Keep track of the next cluster in case we have to relink
//
NextPte = PointerPte + 1;
//
// Can this cluster satisfy the request?
//
@ -104,7 +103,7 @@ MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
//
NextPte->u.List.NextEntry = ClusterSize - NumberOfPtes;
}
//
// Decrement the free count and move to the next starting PTE
//
@ -112,7 +111,7 @@ MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
PointerPte += (ClusterSize - NumberOfPtes);
break;
}
//
// Did we find exactly what you wanted?
//
@ -124,7 +123,7 @@ MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
PreviousPte->u.List.NextEntry = PointerPte->u.List.NextEntry;
MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
break;
}
}
}
else if (NumberOfPtes == 1)
{
@ -135,7 +134,7 @@ MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
MmTotalFreeSystemPtes[SystemPtePoolType]--;
break;
}
//
// We couldn't find what you wanted -- is this the last cluster?
//
@ -154,8 +153,8 @@ MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
PreviousPte = PointerPte;
PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
ASSERT(PointerPte > PreviousPte);
}
}
//
// Release the lock, flush the TLB and return the first PTE
//
@ -202,30 +201,30 @@ MiReleaseSystemPtes(IN PMMPTE StartingPte,
KIRQL OldIrql;
ULONG_PTR ClusterSize, CurrentSize;
PMMPTE CurrentPte, NextPte, PointerPte;
//
// Check to make sure the PTE address is within bounds
//
ASSERT(NumberOfPtes != 0);
ASSERT(StartingPte >= MmSystemPtesStart[SystemPtePoolType]);
ASSERT(StartingPte <= MmSystemPtesEnd[SystemPtePoolType]);
//
// Zero PTEs
//
RtlZeroMemory(StartingPte, NumberOfPtes * sizeof(MMPTE));
CurrentSize = (ULONG_PTR)(StartingPte - MmSystemPteBase);
//
// Acquire the system PTE lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueueSystemSpaceLock);
//
// Increase availability
//
MmTotalFreeSystemPtes[SystemPtePoolType] += NumberOfPtes;
//
// Get the free cluster and start going through them
//
@ -243,12 +242,12 @@ MiReleaseSystemPtes(IN PMMPTE StartingPte,
//
ASSERT(((StartingPte + NumberOfPtes) <= PointerPte) ||
(CurrentPte->u.List.NextEntry == ((ULONG)0xFFFFF)));
//
// Get the next cluster in case it's the one
//
NextPte = CurrentPte + 1;
//
// Check if this was actually a single-PTE entry
//
@ -266,7 +265,7 @@ MiReleaseSystemPtes(IN PMMPTE StartingPte,
//
ClusterSize = (ULONG_PTR)NextPte->u.List.NextEntry;
}
//
// So check if this cluster actually describes the entire mapping
//
@ -278,7 +277,7 @@ MiReleaseSystemPtes(IN PMMPTE StartingPte,
NumberOfPtes += ClusterSize;
NextPte->u.List.NextEntry = NumberOfPtes;
CurrentPte->u.List.OneEntry = 0;
//
// Make another pass
//
@ -291,7 +290,7 @@ MiReleaseSystemPtes(IN PMMPTE StartingPte,
//
StartingPte->u.List.NextEntry = CurrentPte->u.List.NextEntry;
CurrentPte->u.List.NextEntry = CurrentSize;
//
// Is there just one page left?
//
@ -312,7 +311,7 @@ MiReleaseSystemPtes(IN PMMPTE StartingPte,
NextPte->u.List.NextEntry = NumberOfPtes;
}
}
//
// Now check if we've arrived at yet another cluster
//
@ -324,7 +323,7 @@ MiReleaseSystemPtes(IN PMMPTE StartingPte,
StartingPte->u.List.NextEntry = PointerPte->u.List.NextEntry;
StartingPte->u.List.OneEntry = 0;
NextPte = StartingPte + 1;
//
// Check if the cluster only had one page
//
@ -343,20 +342,20 @@ MiReleaseSystemPtes(IN PMMPTE StartingPte,
PointerPte++;
ClusterSize = (ULONG_PTR)PointerPte->u.List.NextEntry;
}
//
// And create the final combined cluster
//
NextPte->u.List.NextEntry = NumberOfPtes + ClusterSize;
}
//
// We released the PTEs into their cluster (and optimized the list)
//
KeReleaseQueuedSpinLock(LockQueueSystemSpaceLock, OldIrql);
break;
}
//
// Try the next cluster of PTEs...
//
@ -375,7 +374,7 @@ MiInitializeSystemPtes(IN PMMPTE StartingPte,
// Sanity checks
//
ASSERT(NumberOfPtes >= 1);
//
// Set the starting and ending PTE addresses for this space
//
@ -384,12 +383,12 @@ MiInitializeSystemPtes(IN PMMPTE StartingPte,
MmSystemPtesEnd[PoolType] = StartingPte + NumberOfPtes - 1;
DPRINT("System PTE space for %d starting at: %p and ending at: %p\n",
PoolType, MmSystemPtesStart[PoolType], MmSystemPtesEnd[PoolType]);
//
// Clear all the PTEs to start with
//
RtlZeroMemory(StartingPte, NumberOfPtes * sizeof(MMPTE));
//
// Make the first entry free and link it
//
@ -397,19 +396,19 @@ MiInitializeSystemPtes(IN PMMPTE StartingPte,
MmFirstFreeSystemPte[PoolType].u.Long = 0;
MmFirstFreeSystemPte[PoolType].u.List.NextEntry = StartingPte -
MmSystemPteBase;
//
// The second entry stores the size of this PTE space
//
StartingPte++;
StartingPte->u.Long = 0;
StartingPte->u.List.NextEntry = NumberOfPtes;
//
// We also keep a global for it
//
MmTotalFreeSystemPtes[PoolType] = NumberOfPtes;
//
// Check if this is the system PTE space
//

View file

@ -13,7 +13,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::VADNODE"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -86,7 +85,7 @@ MiCheckForConflictingNode(IN ULONG_PTR StartVpn,
break;
}
}
/* Return either the conflicting node, or no node at all */
return CurrentNode;
}
@ -147,16 +146,16 @@ MiInsertVad(IN PMMVAD Vad,
{
TABLE_SEARCH_RESULT Result;
PMMADDRESS_NODE Parent = NULL;
/* Validate the VAD and set it as the current hint */
ASSERT(Vad->EndingVpn >= Vad->StartingVpn);
Process->VadRoot.NodeHint = Vad;
/* Find the parent VAD and where this child should be inserted */
Result = RtlpFindAvlTableNodeOrParent(&Process->VadRoot, (PVOID)Vad->StartingVpn, &Parent);
ASSERT(Result != TableFoundNode);
ASSERT((Parent != NULL) || (Result == TableEmptyTree));
/* Do the actual insert operation */
MiInsertNode(&Process->VadRoot, (PVOID)Vad, Parent, Result);
}
@ -168,7 +167,7 @@ MiRemoveNode(IN PMMADDRESS_NODE Node,
{
/* Call the AVL code */
RtlpDeleteAvlTreeNode(Table, Node);
/* Decrease element count */
Table->NumberGenericTableElements--;
@ -186,7 +185,7 @@ MiRemoveNode(IN PMMADDRESS_NODE Node,
{
PMEMORY_AREA MemoryArea;
PEPROCESS Process;
/* Check if this is VM VAD */
if (Vad->ControlArea == NULL)
{
@ -198,17 +197,17 @@ MiRemoveNode(IN PMMADDRESS_NODE Node,
/* This is a section VAD. We store the ReactOS MEMORY_AREA here */
MemoryArea = (PMEMORY_AREA)Vad->ControlArea->WaitingForDeletion;
}
/* Make sure one actually still exists */
if (MemoryArea)
{
/* Get the process */
Process = CONTAINING_RECORD(Table, EPROCESS, VadRoot);
/* We only create fake memory-areas for ARM3 VADs */
ASSERT(MemoryArea->Type == MEMORY_AREA_OWNED_BY_ARM3);
ASSERT(MemoryArea->Vad == NULL);
/* Free it */
MmFreeMemoryArea(&Process->Vm, MemoryArea, NULL, NULL);
}
@ -241,12 +240,12 @@ MiGetPreviousNode(IN PMMADDRESS_NODE Node)
if (Parent == RtlParentAvl(Parent)) Parent = NULL;
return Parent;
}
/* Keep lopping until we find our parent */
Node = Parent;
Parent = RtlParentAvl(Node);
}
/* Nothing found */
return NULL;
}
@ -276,12 +275,12 @@ MiGetNextNode(IN PMMADDRESS_NODE Node)
/* Return it */
return Parent;
}
/* Keep lopping until we find our parent */
Node = Parent;
Parent = RtlParentAvl(Node);
}
/* Nothing found */
return NULL;
}
@ -328,7 +327,7 @@ FoundAtBottom:
{
/* The last aligned page number in this entry */
LowVpn = ROUND_UP(Node->EndingVpn + 1, AlignmentVpn);
/* Keep going as long as there's still a next node */
NextNode = MiGetNextNode(Node);
if (!NextNode) break;
@ -340,7 +339,7 @@ FoundAtBottom:
Found:
/* Yes! Use this VAD to store the allocation */
*PreviousVad = Node;
*Base = ROUND_UP((Node->EndingVpn << PAGE_SHIFT) | (PAGE_SIZE - 1),
*Base = ROUND_UP((Node->EndingVpn << PAGE_SHIFT) | (PAGE_SIZE - 1),
Alignment);
return STATUS_SUCCESS;
}
@ -352,7 +351,7 @@ Found:
/* We're down to the last (top) VAD, will this allocation fit inside it? */
HighestVpn = ((ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1) >> PAGE_SHIFT;
if ((HighestVpn > LowVpn) && (LengthVpn <= HighestVpn - LowVpn)) goto Found;
/* Nyet, there's no free address space for this allocation, so we'll fail */
return STATUS_NO_MEMORY;
}
@ -373,7 +372,7 @@ MiFindEmptyAddressRangeDownTree(IN SIZE_T Length,
/* Sanity checks */
ASSERT(BoundaryAddress);
ASSERT(BoundaryAddress <= ((ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1));
/* Compute page length, make sure the boundary address is valid */
Length = ROUND_TO_PAGES(Length);
PageCount = Length >> PAGE_SHIFT;
@ -390,10 +389,10 @@ MiFindEmptyAddressRangeDownTree(IN SIZE_T Length,
/* Calculate the initial upper margin */
HighVpn = BoundaryAddress >> PAGE_SHIFT;
/* Starting from the root, go down until the right-most child,
/* Starting from the root, go down until the right-most child,
trying to stay below the boundary. */
LowestNode = Node = RtlRightChildAvl(&Table->BalancedRoot);
while ( (Child = RtlRightChildAvl(Node)) &&
while ( (Child = RtlRightChildAvl(Node)) &&
Child->EndingVpn < HighVpn ) Node = Child;
/* Now loop the Vad nodes */

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::VIRTUAL"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -41,7 +40,7 @@ MiMakeSystemAddressValid(IN PVOID PageTableVirtualAddress,
ASSERT(PageTableVirtualAddress > MM_HIGHEST_USER_ADDRESS);
ASSERT((PageTableVirtualAddress < MmPagedPoolStart) ||
(PageTableVirtualAddress > MmPagedPoolEnd));
/* Working set lock or PFN lock should be held */
ASSERT(KeAreAllApcsDisabled() == TRUE);
@ -84,7 +83,7 @@ MiMakeSystemAddressValidPfn(IN PVOID VirtualAddress,
{
/* Release the PFN database */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Fault it in */
Status = MmAccessFault(FALSE, VirtualAddress, KernelMode, NULL);
if (!NT_SUCCESS(Status))
@ -99,7 +98,7 @@ MiMakeSystemAddressValidPfn(IN PVOID VirtualAddress,
/* This flag will be useful later when we do better locking */
LockChange = TRUE;
/* Lock the PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
}
@ -114,17 +113,17 @@ MiDeleteSystemPageableVm(IN PMMPTE PointerPte,
IN PFN_NUMBER PageCount,
IN ULONG Flags,
OUT PPFN_NUMBER ValidPages)
{
{
PFN_NUMBER ActualPages = 0;
PETHREAD CurrentThread = PsGetCurrentThread();
PMMPFN Pfn1, Pfn2;
PFN_NUMBER PageFrameIndex, PageTableIndex;
KIRQL OldIrql;
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
/* Lock the system working set */
MiLockWorkingSet(CurrentThread, &MmSystemCacheWs);
/* Loop all pages */
while (PageCount)
{
@ -134,50 +133,50 @@ MiDeleteSystemPageableVm(IN PMMPTE PointerPte,
/* As always, only handle current ARM3 scenarios */
ASSERT(PointerPte->u.Soft.Prototype == 0);
ASSERT(PointerPte->u.Soft.Transition == 0);
/* Normally this is one possibility -- freeing a valid page */
if (PointerPte->u.Hard.Valid)
{
/* Get the page PFN */
PageFrameIndex = PFN_FROM_PTE(PointerPte);
Pfn1 = MiGetPfnEntry(PageFrameIndex);
/* Should not have any working set data yet */
ASSERT(Pfn1->u1.WsIndex == 0);
/* Actual valid, legitimate, pages */
if (ValidPages) (*ValidPages)++;
/* Get the page table entry */
PageTableIndex = Pfn1->u4.PteFrame;
Pfn2 = MiGetPfnEntry(PageTableIndex);
/* Lock the PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
/* Delete it the page */
MI_SET_PFN_DELETED(Pfn1);
MiDecrementShareCount(Pfn1, PageFrameIndex);
/* Decrement the page table too */
DPRINT("FIXME: ARM3 should decrement the pool PDE refcount for: %p\n", PageTableIndex);
#if 0 // ARM3: Dont't trust this yet
MiDecrementShareCount(Pfn2, PageTableIndex);
#endif
/* Release the PFN database */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Destroy the PTE */
PointerPte->u.Long = 0;
}
/* Actual legitimate pages */
ActualPages++;
}
else
{
/*
/*
* The only other ARM3 possibility is a demand zero page, which would
* mean freeing some of the paged pool pages that haven't even been
* touched yet, as part of a larger allocation.
@ -185,22 +184,22 @@ MiDeleteSystemPageableVm(IN PMMPTE PointerPte,
* Right now, we shouldn't expect any page file information in the PTE
*/
ASSERT(PointerPte->u.Soft.PageFileHigh == 0);
/* Destroy the PTE */
PointerPte->u.Long = 0;
}
/* Keep going */
PointerPte++;
PageCount--;
}
/* Release the working set */
MiUnlockWorkingSet(CurrentThread, &MmSystemCacheWs);
/* Flush the entire TLB */
KeFlushEntireTb(TRUE, TRUE);
/* Done */
return ActualPages;
}
@ -243,7 +242,7 @@ MiDeletePte(IN PMMPTE PointerPte,
PointerPde = MiAddressToPde(PointerPte);
if (PointerPde->u.Hard.Valid == 0)
{
#if (_MI_PAGING_LEVELS == 2)
#if (_MI_PAGING_LEVELS == 2)
/* Could be paged pool access from a new process -- synchronize the page directories */
if (!NT_SUCCESS(MiCheckPdeForPagedPool(VirtualAddress)))
{
@ -255,7 +254,7 @@ MiDeletePte(IN PMMPTE PointerPte,
PointerPte->u.Long,
(ULONG_PTR)VirtualAddress);
}
#if (_MI_PAGING_LEVELS == 2)
#if (_MI_PAGING_LEVELS == 2)
}
#endif
/* FIXME: Drop the reference on the page table. For now, leak it until RosMM is gone */
@ -263,7 +262,7 @@ MiDeletePte(IN PMMPTE PointerPte,
/* Drop the share count */
MiDecrementShareCount(Pfn1, PageFrameIndex);
/* No fork yet */
if (PointerPte <= MiHighestUserPte) ASSERT(PrototypePte == Pfn1->PteAddress);
}
@ -274,7 +273,7 @@ MiDeletePte(IN PMMPTE PointerPte,
{
/* The PFN entry is illegal, or invalid */
KeBugCheckEx(MEMORY_MANAGEMENT,
0x401,
0x401,
(ULONG_PTR)PointerPte,
PointerPte->u.Long,
(ULONG_PTR)Pfn1->PteAddress);
@ -282,14 +281,14 @@ MiDeletePte(IN PMMPTE PointerPte,
/* There should only be 1 shared reference count */
ASSERT(Pfn1->u2.ShareCount == 1);
/* FIXME: Drop the reference on the page table. For now, leak it until RosMM is gone */
//MiDecrementShareCount(MiGetPfnEntry(Pfn1->u4.PteFrame), Pfn1->u4.PteFrame);
/* Mark the PFN for deletion and dereference what should be the last ref */
MI_SET_PFN_DELETED(Pfn1);
MiDecrementShareCount(Pfn1, PageFrameIndex);
/* We should eventually do this */
//CurrentProcess->NumberOfPrivatePages--;
}
@ -348,15 +347,15 @@ MiDeleteVirtualAddresses(IN ULONG_PTR Va,
/* Still no valid PDE, try the next 4MB (or whatever) */
PointerPde++;
/* Update the PTE on this new boundary */
PointerPte = MiPteToAddress(PointerPde);
/* Check if all the PDEs are invalid, so there's nothing to free */
Va = (ULONG_PTR)MiPteToAddress(PointerPte);
if (Va > EndingAddress) return;
}
/* Now check if the PDE is mapped in */
if (!PointerPde->u.Hard.Valid)
{
@ -364,17 +363,17 @@ MiDeleteVirtualAddresses(IN ULONG_PTR Va,
PointerPte = MiPteToAddress(PointerPde);
MiMakeSystemAddressValid(PointerPte, CurrentProcess);
}
/* Now we should have a valid PDE, mapped in, and still have some VA */
ASSERT(PointerPde->u.Hard.Valid == 1);
ASSERT(Va <= EndingAddress);
/* Check if this is a section VAD with gaps in it */
if ((AddressGap) && (LastPrototypePte))
{
/* We need to skip to the next correct prototype PTE */
PrototypePte = MI_GET_PROTOTYPE_PTE_FOR_VPN(Vad, Va >> PAGE_SHIFT);
/* And we need the subsection to skip to the next last prototype PTE */
Subsection = MiLocateSubsection(Vad, Va >> PAGE_SHIFT);
if (Subsection)
@ -388,7 +387,7 @@ MiDeleteVirtualAddresses(IN ULONG_PTR Va,
PrototypePte = NULL;
}
}
/* Lock the PFN Database while we delete the PTEs */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
do
@ -405,7 +404,7 @@ MiDeleteVirtualAddresses(IN ULONG_PTR Va,
{
/* We need to skip to the next correct prototype PTE */
PrototypePte = MI_GET_PROTOTYPE_PTE_FOR_VPN(Vad, Va >> PAGE_SHIFT);
/* And we need the subsection to skip to the next last prototype PTE */
Subsection = MiLocateSubsection(Vad, Va >> PAGE_SHIFT);
if (Subsection)
@ -419,7 +418,7 @@ MiDeleteVirtualAddresses(IN ULONG_PTR Va,
PrototypePte = NULL;
}
}
/* Check for prototype PTE */
if ((TempPte.u.Hard.Valid == 0) &&
(TempPte.u.Soft.Prototype == 1))
@ -431,7 +430,7 @@ MiDeleteVirtualAddresses(IN ULONG_PTR Va,
{
/* Delete the PTE proper */
MiDeletePte(PointerPte,
(PVOID)Va,
(PVOID)Va,
CurrentProcess,
PrototypePte);
}
@ -447,15 +446,15 @@ MiDeleteVirtualAddresses(IN ULONG_PTR Va,
Va += PAGE_SIZE;
PointerPte++;
PrototypePte++;
/* Making sure the PDE is still valid */
ASSERT(PointerPde->u.Hard.Valid == 1);
ASSERT(PointerPde->u.Hard.Valid == 1);
}
while ((Va & (PDE_MAPPED_VA - 1)) && (Va <= EndingAddress));
/* The PDE should still be valid at this point */
ASSERT(PointerPde->u.Hard.Valid == 1);
/* Release the lock and get out if we're done */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
if (Va > EndingAddress) return;
@ -468,7 +467,7 @@ MiDeleteVirtualAddresses(IN ULONG_PTR Va,
LONG
MiGetExceptionInfo(IN PEXCEPTION_POINTERS ExceptionInfo,
OUT PBOOLEAN HaveBadAddress,
OUT PBOOLEAN HaveBadAddress,
OUT PULONG_PTR BadAddress)
{
PEXCEPTION_RECORD ExceptionRecord;
@ -666,7 +665,7 @@ MiDoMappedCopy(IN PEPROCESS SourceProcess,
// Check if we had locked the pages
//
if (PagesLocked) MmUnlockPages(Mdl);
//
// Check if we hit working set quota
//
@ -1032,7 +1031,7 @@ MmCopyVirtualMemory(IN PEPROCESS SourceProcess,
ExReleaseRundownProtection(&Process->RundownProtect);
return Status;
}
NTSTATUS
NTAPI
MmFlushVirtualMemory(IN PEPROCESS Process,
@ -1042,7 +1041,7 @@ MmFlushVirtualMemory(IN PEPROCESS Process,
{
PAGED_CODE();
UNIMPLEMENTED;
//
// Fake success
//
@ -1110,7 +1109,7 @@ MiQueryAddressState(IN PVOID Va,
/* Only normal VADs supported */
ASSERT(Vad->u.VadFlags.VadType == VadNone);
/* Get the PDE and PTE for the address */
PointerPde = MiAddressToPde(Va);
PointerPte = MiAddressToPte(Va);
@ -1148,7 +1147,7 @@ MiQueryAddressState(IN PVOID Va,
if (ValidPte)
{
/* FIXME: watch out for large pages */
/* Capture the PTE */
TempPte = *PointerPte;
if (TempPte.u.Long)
@ -1184,7 +1183,7 @@ MiQueryAddressState(IN PVOID Va,
{
/* This is committed memory */
State = MEM_COMMIT;
/* Convert the protection */
Protect = MmProtectToValue[Vad->u.VadFlags.Protection];
}
@ -1285,7 +1284,7 @@ NtReadVirtualMemory(IN HANDLE ProcessHandle,
}
_SEH2_END;
}
//
// Don't do zero-byte transfers
//
@ -1312,7 +1311,7 @@ NtReadVirtualMemory(IN HANDLE ProcessHandle,
NumberOfBytesToRead,
PreviousMode,
&BytesRead);
//
// Dereference the process
//
@ -1426,7 +1425,7 @@ NtWriteVirtualMemory(IN HANDLE ProcessHandle,
NumberOfBytesToWrite,
PreviousMode,
&BytesWritten);
//
// Dereference the process
//
@ -1572,7 +1571,7 @@ NtProtectVirtualMemory(IN HANDLE ProcessHandle,
(PVOID*)(&Process),
NULL);
if (!NT_SUCCESS(Status)) return Status;
//
// Check if we should attach
//
@ -1593,7 +1592,7 @@ NtProtectVirtualMemory(IN HANDLE ProcessHandle,
&NumberOfBytesToProtect,
NewAccessProtection,
&OldAccessProtection);
//
// Detach if needed
//
@ -1643,7 +1642,7 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
PVOID CapturedBaseAddress;
SIZE_T CapturedBytesToLock;
PAGED_CODE();
//
// Validate flags
//
@ -1654,7 +1653,7 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER;
}
//
// At least one flag must be specified
//
@ -1665,7 +1664,7 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER;
}
//
// Enter SEH for probing
//
@ -1676,7 +1675,7 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
//
ProbeForWritePointer(BaseAddress);
ProbeForWriteSize_t(NumberOfBytesToLock);
//
// Capture it
//
@ -1691,12 +1690,12 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
_SEH2_YIELD(return _SEH2_GetExceptionCode());
}
_SEH2_END;
//
// Catch illegal base address
//
if (CapturedBaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER;
//
// Catch illegal region size
//
@ -1707,12 +1706,12 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER;
}
//
// 0 is also illegal
//
if (!CapturedBytesToLock) return STATUS_INVALID_PARAMETER;
//
// Get a reference to the process
//
@ -1723,7 +1722,7 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
(PVOID*)(&Process),
NULL);
if (!NT_SUCCESS(Status)) return Status;
//
// Check if this is is system-mapped
//
@ -1741,7 +1740,7 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
return STATUS_PRIVILEGE_NOT_HELD;
}
}
//
// Check if we should attach
//
@ -1753,22 +1752,22 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
KeStackAttachProcess(&Process->Pcb, &ApcState);
Attached = TRUE;
}
//
// Oops :(
//
UNIMPLEMENTED;
//
// Detach if needed
//
if (Attached) KeUnstackDetachProcess(&ApcState);
//
// Release reference
//
ObDereferenceObject(Process);
//
// Enter SEH to return data
//
@ -1788,7 +1787,7 @@ NtLockVirtualMemory(IN HANDLE ProcessHandle,
_SEH2_YIELD(return _SEH2_GetExceptionCode());
}
_SEH2_END;
//
// Return status
//
@ -1811,7 +1810,7 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
PVOID CapturedBaseAddress;
SIZE_T CapturedBytesToUnlock;
PAGED_CODE();
//
// Validate flags
//
@ -1822,7 +1821,7 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER;
}
//
// At least one flag must be specified
//
@ -1833,7 +1832,7 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER;
}
//
// Enter SEH for probing
//
@ -1844,7 +1843,7 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
//
ProbeForWritePointer(BaseAddress);
ProbeForWriteSize_t(NumberOfBytesToUnlock);
//
// Capture it
//
@ -1859,12 +1858,12 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
_SEH2_YIELD(return _SEH2_GetExceptionCode());
}
_SEH2_END;
//
// Catch illegal base address
//
if (CapturedBaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER;
//
// Catch illegal region size
//
@ -1875,12 +1874,12 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER;
}
//
// 0 is also illegal
//
if (!CapturedBytesToUnlock) return STATUS_INVALID_PARAMETER;
//
// Get a reference to the process
//
@ -1891,7 +1890,7 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
(PVOID*)(&Process),
NULL);
if (!NT_SUCCESS(Status)) return Status;
//
// Check if this is is system-mapped
//
@ -1909,7 +1908,7 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
return STATUS_PRIVILEGE_NOT_HELD;
}
}
//
// Check if we should attach
//
@ -1921,22 +1920,22 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
KeStackAttachProcess(&Process->Pcb, &ApcState);
Attached = TRUE;
}
//
// Oops :(
//
UNIMPLEMENTED;
//
// Detach if needed
//
if (Attached) KeUnstackDetachProcess(&ApcState);
//
// Release reference
//
ObDereferenceObject(Process);
//
// Enter SEH to return data
//
@ -1956,7 +1955,7 @@ NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
_SEH2_YIELD(return _SEH2_GetExceptionCode());
}
_SEH2_END;
//
// Return status
//
@ -1977,7 +1976,7 @@ NtFlushVirtualMemory(IN HANDLE ProcessHandle,
SIZE_T CapturedBytesToFlush;
IO_STATUS_BLOCK LocalStatusBlock;
PAGED_CODE();
//
// Check if we came from user mode
//
@ -1994,7 +1993,7 @@ NtFlushVirtualMemory(IN HANDLE ProcessHandle,
ProbeForWritePointer(BaseAddress);
ProbeForWriteSize_t(NumberOfBytesToFlush);
ProbeForWriteIoStatusBlock(IoStatusBlock);
//
// Capture them
//
@ -2018,12 +2017,12 @@ NtFlushVirtualMemory(IN HANDLE ProcessHandle,
CapturedBaseAddress = *BaseAddress;
CapturedBytesToFlush = *NumberOfBytesToFlush;
}
//
// Catch illegal base address
//
if (CapturedBaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER;
//
// Catch illegal region size
//
@ -2034,7 +2033,7 @@ NtFlushVirtualMemory(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER;
}
//
// Get a reference to the process
//
@ -2045,7 +2044,7 @@ NtFlushVirtualMemory(IN HANDLE ProcessHandle,
(PVOID*)(&Process),
NULL);
if (!NT_SUCCESS(Status)) return Status;
//
// Do it
//
@ -2053,12 +2052,12 @@ NtFlushVirtualMemory(IN HANDLE ProcessHandle,
&CapturedBaseAddress,
&CapturedBytesToFlush,
&LocalStatusBlock);
//
// Release reference
//
ObDereferenceObject(Process);
//
// Enter SEH to return data
//
@ -2075,7 +2074,7 @@ NtFlushVirtualMemory(IN HANDLE ProcessHandle,
{
}
_SEH2_END;
//
// Return status
//
@ -2101,7 +2100,7 @@ NtGetWriteWatch(IN HANDLE ProcessHandle,
KPROCESSOR_MODE PreviousMode = ExGetPreviousMode();
ULONG_PTR CapturedEntryCount;
PAGED_CODE();
//
// Check if we came from user mode
//
@ -2116,7 +2115,7 @@ NtGetWriteWatch(IN HANDLE ProcessHandle,
// Catch illegal base address
//
if (BaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER_2;
//
// Catch illegal region size
//
@ -2127,23 +2126,23 @@ NtGetWriteWatch(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER_3;
}
//
// Validate all data
//
ProbeForWriteSize_t(EntriesInUserAddressArray);
ProbeForWriteUlong(Granularity);
//
// Capture them
//
CapturedEntryCount = *EntriesInUserAddressArray;
//
// Must have a count
//
if (CapturedEntryCount == 0) return STATUS_INVALID_PARAMETER_5;
//
// Can't be larger than the maximum
//
@ -2154,7 +2153,7 @@ NtGetWriteWatch(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER_5;
}
//
// Probe the actual array
//
@ -2179,7 +2178,7 @@ NtGetWriteWatch(IN HANDLE ProcessHandle,
CapturedEntryCount = *EntriesInUserAddressArray;
ASSERT(CapturedEntryCount != 0);
}
//
// Check if this is a local request
//
@ -2190,7 +2189,7 @@ NtGetWriteWatch(IN HANDLE ProcessHandle,
//
Process = PsGetCurrentProcess();
}
else
else
{
//
// Reference the target
@ -2203,7 +2202,7 @@ NtGetWriteWatch(IN HANDLE ProcessHandle,
NULL);
if (!NT_SUCCESS(Status)) return Status;
}
//
// Compute the last address and validate it
//
@ -2216,17 +2215,17 @@ NtGetWriteWatch(IN HANDLE ProcessHandle,
if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
return STATUS_INVALID_PARAMETER_4;
}
//
// Oops :(
//
UNIMPLEMENTED;
//
// Dereference if needed
//
if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
//
// Enter SEH to return data
//
@ -2246,7 +2245,7 @@ NtGetWriteWatch(IN HANDLE ProcessHandle,
Status = _SEH2_GetExceptionCode();
}
_SEH2_END;
//
// Return success
//
@ -2267,12 +2266,12 @@ NtResetWriteWatch(IN HANDLE ProcessHandle,
NTSTATUS Status;
KPROCESSOR_MODE PreviousMode = ExGetPreviousMode();
ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
//
// Catch illegal base address
//
if (BaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER_2;
//
// Catch illegal region size
//
@ -2283,7 +2282,7 @@ NtResetWriteWatch(IN HANDLE ProcessHandle,
//
return STATUS_INVALID_PARAMETER_3;
}
//
// Check if this is a local request
//
@ -2294,7 +2293,7 @@ NtResetWriteWatch(IN HANDLE ProcessHandle,
//
Process = PsGetCurrentProcess();
}
else
else
{
//
// Reference the target
@ -2307,7 +2306,7 @@ NtResetWriteWatch(IN HANDLE ProcessHandle,
NULL);
if (!NT_SUCCESS(Status)) return Status;
}
//
// Compute the last address and validate it
//
@ -2320,17 +2319,17 @@ NtResetWriteWatch(IN HANDLE ProcessHandle,
if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
return STATUS_INVALID_PARAMETER_3;
}
//
// Oops :(
//
UNIMPLEMENTED;
//
// Dereference if needed
//
if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
//
// Return success
//
@ -2420,7 +2419,7 @@ MiQueryMemoryBasicInformation(IN HANDLE ProcessHandle,
(PVOID*)&TargetProcess,
NULL);
if (!NT_SUCCESS(Status)) return Status;
/* Attach to it now */
KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
}
@ -2542,14 +2541,14 @@ MiQueryMemoryBasicInformation(IN HANDLE ProcessHandle,
/* This must be a VM VAD */
ASSERT(Vad->u.VadFlags.PrivateMemory);
/* Build the initial information block */
Address = PAGE_ALIGN(BaseAddress);
MemoryInfo.BaseAddress = Address;
MemoryInfo.AllocationBase = (PVOID)(Vad->StartingVpn << PAGE_SHIFT);
MemoryInfo.AllocationProtect = MmProtectToValue[Vad->u.VadFlags.Protection];
MemoryInfo.Type = MEM_PRIVATE;
/* Find the largest chunk of memory which has the same state and protection mask */
MemoryInfo.State = MiQueryAddressState(Address,
Vad,
@ -2722,7 +2721,7 @@ NtQueryVirtualMemory(IN HANDLE ProcessHandle,
return STATUS_INFO_LENGTH_MISMATCH;
}
Status = MiQueryMemoryBasicInformation(ProcessHandle,
BaseAddress,
BaseAddress,
MemoryInformation,
MemoryInformationLength,
ReturnLength);
@ -2736,7 +2735,7 @@ NtQueryVirtualMemory(IN HANDLE ProcessHandle,
return STATUS_INFO_LENGTH_MISMATCH;
}
Status = MiQueryMemorySectionName(ProcessHandle,
BaseAddress,
BaseAddress,
MemoryInformation,
MemoryInformationLength,
ReturnLength);

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::ZEROPAGE"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -35,7 +34,7 @@ MmZeroPageThread(VOID)
PVOID ZeroAddress;
PFN_NUMBER PageIndex, FreePage;
PMMPFN Pfn1;
/* FIXME: Get the discardable sections to free them */
// MiFindInitializationCode(&StartAddress, &EndAddress);
// if (StartAddress) MiFreeInitializationCode(StartAddress, EndAddress);
@ -44,7 +43,7 @@ MmZeroPageThread(VOID)
/* Set our priority to 0 */
Thread->BasePriority = 0;
KeSetPriorityThread(Thread, 0);
/* Setup the wait objects */
WaitObjects[0] = &MmZeroingPageEvent;
// WaitObjects[1] = &PoSystemIdleTimer; FIXME: Implement idle timer
@ -75,7 +74,7 @@ MmZeroPageThread(VOID)
MI_SET_USAGE(MI_USAGE_ZERO_LOOP);
MI_SET_PROCESS2("Kernel 0 Loop");
FreePage = MiRemoveAnyPage(MI_GET_PAGE_COLOR(PageIndex));
/* The first global free page should also be the first on its own list */
if (FreePage != PageIndex)
{
@ -85,15 +84,15 @@ MmZeroPageThread(VOID)
PageIndex,
0);
}
Pfn1->u1.Flink = LIST_HEAD;
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
ZeroAddress = MiMapPagesToZeroInHyperSpace(Pfn1, 1);
ASSERT(ZeroAddress);
RtlZeroMemory(ZeroAddress, PAGE_SIZE);
MiUnmapPagesInZeroSpace(ZeroAddress, 1);
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MiInsertPageInList(&MmZeroedPageListHead, PageIndex);

View file

@ -12,7 +12,6 @@
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::ARMPAGE"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
@ -329,7 +328,7 @@ MmInitGlobalKernelPageDirectory(VOID)
ULONG i;
PULONG CurrentPageDirectory = (PULONG)PDE_BASE;
/* Loop the 2GB of address space which belong to the kernel */
for (i = MiGetPdeOffset(MmSystemRangeStart); i < 2048; i++)
{