reactos/ntoskrnl/mm/ARM3/mmsup.c

155 lines
3.1 KiB
C
Raw Normal View History

/*
* PROJECT: ReactOS Kernel
* LICENSE: BSD - See COPYING.ARM in the top level directory
* FILE: ntoskrnl/mm/ARM3/mmsup.c
* PURPOSE: ARM Memory Manager Support Routines
* PROGRAMMERS: ReactOS Portable Systems Group
*/
/* INCLUDES *******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#line 15 "ARM³::MMSUP"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
/* PUBLIC FUNCTIONS ***********************************************************/
/*
* @unimplemented
*/
NTSTATUS
NTAPI
MmMapUserAddressesToPage(IN PVOID BaseAddress,
IN SIZE_T NumberOfBytes,
IN PVOID PageAddress)
{
UNIMPLEMENTED;
return STATUS_NOT_IMPLEMENTED;
}
/*
* @unimplemented
*/
NTSTATUS
NTAPI
MmAdjustWorkingSetSize(IN SIZE_T WorkingSetMinimumInBytes,
IN SIZE_T WorkingSetMaximumInBytes,
IN ULONG SystemCache,
IN BOOLEAN IncreaseOkay)
{
UNIMPLEMENTED;
return STATUS_NOT_IMPLEMENTED;
}
/*
* @unimplemented
*/
BOOLEAN
NTAPI
MmSetAddressRangeModified(IN PVOID Address,
IN SIZE_T Length)
{
UNIMPLEMENTED;
return FALSE;
}
- Implement support for reading and writing physical memory for KD. The implementation uses a reserved mapping page to map the target physical address to. On x86 this page is located at virtual address 0xFFBFF000, and the PTE for this page is the last PTE of the nonpaged pool's PDE. Other architectures may need to reserve the PTE elsewhere. - The physical memory support relies on several Mm variables and structures to be properly set up. Add a new flag, MiDbgReadyForPhysical, and set it when the debugger support can handle physical memory requests. - Protect this page with a Memory Area to make the old Mm keep its dirty hands off it. - Does not support I/O space or cache flags yet. - Add generic KeInvalidateTlbEntry to invalidate a single TLB entry for a given address instead of flushing the whole TLB. Used by the debugger physical memory support as invalidating the whole TLB for every map and unmap of its debug PTE would incur significant overhead for large copies. Replace direct usage of __invlpg() with this in x86 code too. - Fix incorrect cache flag check and set in KdpRead/WritePhysicalmemory for write combined requests. The debugger's Uncached flag was checked instead of the Write Combined flag, and the debuggers Write Combine number (0x3) was set instead of Mm's flag (0x20). - Fix implementation of MmIsAddressValid (at least for x86; other architectures will need more checks). Just check the Address' PDE and PTE valid bits instead of using Memory Areas. - Add missing ASSERTs to ensure the Memory Areas for paged pool, the PCR page, and the Shared User Data page are created. - Add missing Memory Area for the 2 pages HAL currently uses for its own mappings on x86 -- previously, those pages could have been allocated by other parts of the OS, which would have resulted in serious corruptions. svn path=/trunk/; revision=43960
2009-11-04 22:40:18 +00:00
/*
* @implemented
*/
BOOLEAN
NTAPI
MmIsAddressValid(IN PVOID VirtualAddress)
{
#if _MI_PAGING_LEVELS >= 4
/* Check if the PXE is valid */
if (MiAddressToPxe(VirtualAddress)->u.Hard.Valid == 0) return FALSE;
#endif
- Implement support for reading and writing physical memory for KD. The implementation uses a reserved mapping page to map the target physical address to. On x86 this page is located at virtual address 0xFFBFF000, and the PTE for this page is the last PTE of the nonpaged pool's PDE. Other architectures may need to reserve the PTE elsewhere. - The physical memory support relies on several Mm variables and structures to be properly set up. Add a new flag, MiDbgReadyForPhysical, and set it when the debugger support can handle physical memory requests. - Protect this page with a Memory Area to make the old Mm keep its dirty hands off it. - Does not support I/O space or cache flags yet. - Add generic KeInvalidateTlbEntry to invalidate a single TLB entry for a given address instead of flushing the whole TLB. Used by the debugger physical memory support as invalidating the whole TLB for every map and unmap of its debug PTE would incur significant overhead for large copies. Replace direct usage of __invlpg() with this in x86 code too. - Fix incorrect cache flag check and set in KdpRead/WritePhysicalmemory for write combined requests. The debugger's Uncached flag was checked instead of the Write Combined flag, and the debuggers Write Combine number (0x3) was set instead of Mm's flag (0x20). - Fix implementation of MmIsAddressValid (at least for x86; other architectures will need more checks). Just check the Address' PDE and PTE valid bits instead of using Memory Areas. - Add missing ASSERTs to ensure the Memory Areas for paged pool, the PCR page, and the Shared User Data page are created. - Add missing Memory Area for the 2 pages HAL currently uses for its own mappings on x86 -- previously, those pages could have been allocated by other parts of the OS, which would have resulted in serious corruptions. svn path=/trunk/; revision=43960
2009-11-04 22:40:18 +00:00
#if _MI_PAGING_LEVELS >= 3
/* Check if the PPE is valid */
if (MiAddressToPpe(VirtualAddress)->u.Hard.Valid == 0) return FALSE;
#endif
#if _MI_PAGING_LEVELS >= 2
/* Check if the PDE is valid */
if (MiAddressToPde(VirtualAddress)->u.Hard.Valid == 0) return FALSE;
#endif
/* Check if the PTE is valid */
if (MiAddressToPte(VirtualAddress)->u.Hard.Valid == 0) return FALSE;
/* This address is valid now, but it will only stay so if the caller holds
* the PFN lock */
- Implement support for reading and writing physical memory for KD. The implementation uses a reserved mapping page to map the target physical address to. On x86 this page is located at virtual address 0xFFBFF000, and the PTE for this page is the last PTE of the nonpaged pool's PDE. Other architectures may need to reserve the PTE elsewhere. - The physical memory support relies on several Mm variables and structures to be properly set up. Add a new flag, MiDbgReadyForPhysical, and set it when the debugger support can handle physical memory requests. - Protect this page with a Memory Area to make the old Mm keep its dirty hands off it. - Does not support I/O space or cache flags yet. - Add generic KeInvalidateTlbEntry to invalidate a single TLB entry for a given address instead of flushing the whole TLB. Used by the debugger physical memory support as invalidating the whole TLB for every map and unmap of its debug PTE would incur significant overhead for large copies. Replace direct usage of __invlpg() with this in x86 code too. - Fix incorrect cache flag check and set in KdpRead/WritePhysicalmemory for write combined requests. The debugger's Uncached flag was checked instead of the Write Combined flag, and the debuggers Write Combine number (0x3) was set instead of Mm's flag (0x20). - Fix implementation of MmIsAddressValid (at least for x86; other architectures will need more checks). Just check the Address' PDE and PTE valid bits instead of using Memory Areas. - Add missing ASSERTs to ensure the Memory Areas for paged pool, the PCR page, and the Shared User Data page are created. - Add missing Memory Area for the 2 pages HAL currently uses for its own mappings on x86 -- previously, those pages could have been allocated by other parts of the OS, which would have resulted in serious corruptions. svn path=/trunk/; revision=43960
2009-11-04 22:40:18 +00:00
return TRUE;
}
/*
* @unimplemented
*/
BOOLEAN
NTAPI
MmIsNonPagedSystemAddressValid(IN PVOID VirtualAddress)
{
DPRINT1("WARNING: %s returns bogus result\n", __FUNCTION__);
return MmIsAddressValid(VirtualAddress);
}
/*
* @unimplemented
*/
NTSTATUS
NTAPI
MmSetBankedSection(IN HANDLE ProcessHandle,
IN PVOID VirtualAddress,
IN ULONG BankLength,
IN BOOLEAN ReadWriteBank,
IN PVOID BankRoutine,
IN PVOID Context)
{
UNIMPLEMENTED;
return STATUS_NOT_IMPLEMENTED;
}
/*
* @implemented
*/
BOOLEAN
NTAPI
MmIsRecursiveIoFault(VOID)
{
PETHREAD Thread = PsGetCurrentThread();
//
// If any of these is true, this is a recursive fault
//
return ((Thread->DisablePageFaultClustering) | (Thread->ForwardClusterOnly));
}
/*
* @implemented
*/
BOOLEAN
NTAPI
MmIsThisAnNtAsSystem(VOID)
{
/* Return if this is a server system */
return MmProductType;
}
/*
* @implemented
*/
MM_SYSTEMSIZE
NTAPI
MmQuerySystemSize(VOID)
{
/* Return the low, medium or high memory system type */
return MmSystemSize;
}
/* EOF */