[NTOS:MM][NTOS:CC][FASTFAT][FASTFAT_NEW] Massive overhaul

- Make Cc be a client of Mm
   - Cc now creates a section object for each Shared cache map it creates
   - Some functions where introduced into Mm in order to let Cc make notice of dirtified pages, file flushes & purges, etc.
 - Make Mm the real master of how pages are kept in memory for mapped files (including cached files)
   - Keep track of dirty pages
   - Do not dismiss pages as soon as section object are closed
 - Fine tune the balancer to account for this changes (far from perfect yet)
 - Those changes are incompatbile with fastfat -> enable fastfat_new
   - Make Cc routines compatible with it, esp. regarding file locking, write-behind, etc.
This commit is contained in:
Jérôme Gardou 2021-02-03 10:29:45 +01:00
commit b0c143c710
41 changed files with 3638 additions and 3867 deletions

View file

@ -555,7 +555,7 @@ InstallBootCodeToDisk(
IN PCWSTR RootPath,
IN PFS_INSTALL_BOOTCODE InstallBootCode)
{
NTSTATUS Status;
NTSTATUS Status, LockStatus;
UNICODE_STRING Name;
OBJECT_ATTRIBUTES ObjectAttributes;
IO_STATUS_BLOCK IoStatusBlock;
@ -584,9 +584,32 @@ InstallBootCodeToDisk(
if (!NT_SUCCESS(Status))
return Status;
/* Lock the volume */
LockStatus = NtFsControlFile(PartitionHandle, NULL, NULL, NULL, &IoStatusBlock, FSCTL_LOCK_VOLUME, NULL, 0, NULL, 0);
if (!NT_SUCCESS(LockStatus))
{
DPRINT1("Unable to lock the volume before installing boot code. Status 0x%08x. Expect problems.\n", LockStatus);
}
/* Install the bootcode (MBR, VBR) */
Status = InstallBootCode(SrcPath, PartitionHandle, PartitionHandle);
/* dismount & Unlock the volume */
if (NT_SUCCESS(LockStatus))
{
LockStatus = NtFsControlFile(PartitionHandle, NULL, NULL, NULL, &IoStatusBlock, FSCTL_DISMOUNT_VOLUME, NULL, 0, NULL, 0);
if (!NT_SUCCESS(LockStatus))
{
DPRINT1("Unable to unlock the volume after installing boot code. Status 0x%08x. Expect problems.\n", LockStatus);
}
LockStatus = NtFsControlFile(PartitionHandle, NULL, NULL, NULL, &IoStatusBlock, FSCTL_UNLOCK_VOLUME, NULL, 0, NULL, 0);
if (!NT_SUCCESS(LockStatus))
{
DPRINT1("Unable to unlock the volume after installing boot code. Status 0x%08x. Expect problems.\n", LockStatus);
}
}
/* Close the partition */
NtClose(PartitionHandle);
@ -1076,6 +1099,7 @@ InstallFatBootcodeToPartition(
DPRINT1("Install FAT32 bootcode: %S ==> %S\n", SrcPath, SystemRootPath->Buffer);
Status = InstallBootCodeToDisk(SrcPath, SystemRootPath->Buffer, InstallFat32BootCode);
DPRINT1("Status: 0x%08X\n", Status);
if (!NT_SUCCESS(Status))
{
DPRINT1("InstallBootCodeToDisk(FAT32) failed (Status %lx)\n", Status);

View file

@ -2,7 +2,8 @@
add_subdirectory(btrfs)
add_subdirectory(cdfs)
add_subdirectory(ext2)
add_subdirectory(fastfat)
#add_subdirectory(fastfat)
add_subdirectory(fastfat_new)
add_subdirectory(ffs)
add_subdirectory(fs_rec)
add_subdirectory(msfs)

View file

@ -6,7 +6,7 @@ typedef struct _NOCC_BCB
PUBLIC_BCB Bcb;
struct _NOCC_CACHE_MAP *Map;
PROS_SECTION_OBJECT SectionObject;
PSECTION SectionObject;
LARGE_INTEGER FileOffset;
ULONG Length;
PVOID BaseAddress;
@ -62,10 +62,6 @@ NTAPI
CcpUnpinData(PNOCC_BCB Bcb,
BOOLEAN ActuallyRelease);
BOOLEAN
NTAPI
CcInitializeCacheManager(VOID);
VOID
NTAPI
CcShutdownSystem(VOID);

View file

@ -112,6 +112,7 @@ _MmUnlockSectionSegment(PMM_SECTION_SEGMENT Segment, const char *file, int line)
//DPRINT("MmUnlockSectionSegment(%p,%s:%d)\n", Segment, file, line);
}
#ifdef NEWCC
/*
MiFlushMappedSection
@ -265,7 +266,6 @@ This deletes a segment entirely including its page map.
It must have been unmapped in every address space.
*/
VOID
NTAPI
MmFinalizeSegment(PMM_SECTION_SEGMENT Segment)
@ -603,6 +603,7 @@ _MiMapViewOfSegment(PMMSUPPORT AddressSpace,
return STATUS_SUCCESS;
}
#endif
/*
@ -708,6 +709,7 @@ MmFreeCacheSectionPage(PVOID Context,
}
}
#ifdef NEWCC
NTSTATUS
NTAPI
MmUnmapViewOfCacheSegment(PMMSUPPORT AddressSpace,
@ -840,5 +842,6 @@ MmUnmapCacheViewInSystemSpace (IN PVOID MappedBase)
return Status;
}
#endif /* NEWCC */
/* EOF */

View file

@ -82,6 +82,8 @@ rmaps, so each mapping should be immediately followed by an rmap addition.
#define DPRINTC DPRINT
extern KEVENT MmWaitPageEvent;
#ifdef NEWCC
extern PMMWSL MmWorkingSetList;
/*
@ -150,6 +152,7 @@ MmNotPresentFaultCachePage (
if (Segment->FileObject)
{
__debugbreak();
DPRINT("FileName %wZ\n", &Segment->FileObject->FileName);
}
@ -453,9 +456,11 @@ MiCowCacheSectionPage (
DPRINT("Address 0x%p\n", Address);
return STATUS_SUCCESS;
}
#endif
KEVENT MmWaitPageEvent;
#ifdef NEWCC
typedef struct _WORK_QUEUE_WITH_CONTEXT
{
WORK_QUEUE_ITEM WorkItem;
@ -960,3 +965,4 @@ MmNotPresentFaultCacheSection(KPROCESSOR_MODE Mode,
return Status;
}
#endif

View file

@ -188,6 +188,7 @@ MiSimpleRead(PFILE_OBJECT FileObject,
return Status;
}
#ifdef NEWCC
/*
Convenience function for writing from kernel space. This issues a paging
@ -334,3 +335,4 @@ _MiWriteBackPage(PFILE_OBJECT FileObject,
return Status;
}
#endif

View file

@ -3,26 +3,8 @@
#include <internal/arch/mm.h>
/* TYPES *********************************************************************/
#define PFN_FROM_SSE(E) ((PFN_NUMBER)((E) >> PAGE_SHIFT))
#define IS_SWAP_FROM_SSE(E) ((E) & 0x00000001)
#define MM_IS_WAIT_PTE(E) \
(IS_SWAP_FROM_SSE(E) && SWAPENTRY_FROM_SSE(E) == MM_WAIT_ENTRY)
#define MAKE_PFN_SSE(P) ((ULONG_PTR)((P) << PAGE_SHIFT))
#define SWAPENTRY_FROM_SSE(E) ((E) >> 1)
#define MAKE_SWAP_SSE(S) (((ULONG_PTR)(S) << 1) | 0x1)
#define DIRTY_SSE(E) ((E) | 2)
#define CLEAN_SSE(E) ((E) & ~2)
#define IS_DIRTY_SSE(E) ((E) & 2)
#define PAGE_FROM_SSE(E) ((E) & 0xFFFFF000)
#define SHARE_COUNT_FROM_SSE(E) (((E) & 0x00000FFC) >> 2)
#define MAX_SHARE_COUNT 0x3FF
#define MAKE_SSE(P, C) ((ULONG_PTR)((P) | ((C) << 2)))
#define MM_SEGMENT_FINALIZE (0x40000000)
#define RMAP_SEGMENT_MASK ~((ULONG_PTR)0xff)
#define RMAP_IS_SEGMENT(x) (((ULONG_PTR)(x) & RMAP_SEGMENT_MASK) == RMAP_SEGMENT_MASK)
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
@ -96,7 +78,7 @@ typedef struct _MM_REQUIRED_RESOURCES
NTSTATUS
NTAPI
MmCreateCacheSection(PROS_SECTION_OBJECT *SectionObject,
MmCreateCacheSection(PSECTION *SectionObject,
ACCESS_MASK DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes,
PLARGE_INTEGER UMaximumSize,
@ -123,25 +105,6 @@ VOID
NTAPI
MiInitializeSectionPageTable(PMM_SECTION_SEGMENT Segment);
NTSTATUS
NTAPI
_MmSetPageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
PLARGE_INTEGER Offset,
ULONG_PTR Entry,
const char *file,
int line);
ULONG_PTR
NTAPI
_MmGetPageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
PLARGE_INTEGER Offset,
const char *file,
int line);
#define MmSetPageEntrySectionSegment(S,O,E) _MmSetPageEntrySectionSegment(S,O,E,__FILE__,__LINE__)
#define MmGetPageEntrySectionSegment(S,O) _MmGetPageEntrySectionSegment(S,O,__FILE__,__LINE__)
typedef VOID (NTAPI *FREE_SECTION_PAGE_FUN)(
PMM_SECTION_SEGMENT Segment,
PLARGE_INTEGER Offset);
@ -151,12 +114,6 @@ NTAPI
MmFreePageTablesSectionSegment(PMM_SECTION_SEGMENT Segment,
FREE_SECTION_PAGE_FUN FreePage);
/* Yields a lock */
PMM_SECTION_SEGMENT
NTAPI
MmGetSectionAssociation(PFN_NUMBER Page,
PLARGE_INTEGER Offset);
NTSTATUS
NTAPI
MmSetSectionAssociation(PFN_NUMBER Page,
@ -267,22 +224,6 @@ MmPageOutDeleteMapping(PVOID Context,
PEPROCESS Process,
PVOID Address);
VOID
NTAPI
_MmLockSectionSegment(PMM_SECTION_SEGMENT Segment,
const char *file,
int line);
#define MmLockSectionSegment(x) _MmLockSectionSegment(x,__FILE__,__LINE__)
VOID
NTAPI
_MmUnlockSectionSegment(PMM_SECTION_SEGMENT Segment,
const char *file,
int line);
#define MmUnlockSectionSegment(x) _MmUnlockSectionSegment(x,__FILE__,__LINE__)
VOID
MmFreeCacheSectionPage(PVOID Context,
MEMORY_AREA* MemoryArea,
@ -386,7 +327,7 @@ MiSwapInSectionPage(PMMSUPPORT AddressSpace,
NTSTATUS
NTAPI
MmExtendCacheSection(PROS_SECTION_OBJECT Section,
MmExtendCacheSection(PSECTION Section,
PLARGE_INTEGER NewSize,
BOOLEAN ExtendFile);

View file

@ -187,9 +187,6 @@ _MmSetPageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
ASSERT(Segment->Locked);
ASSERT(!IS_SWAP_FROM_SSE(Entry) || !IS_DIRTY_SSE(Entry));
if (Entry && !IS_SWAP_FROM_SSE(Entry))
MmGetRmapListHeadPage(PFN_FROM_SSE(Entry));
PageTable = MiSectionPageTableGetOrAllocate(&Segment->PageTable, Offset);
if (!PageTable) return STATUS_NO_MEMORY;
@ -207,27 +204,51 @@ _MmSetPageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
OldEntry,
Entry);
if (PFN_FROM_SSE(Entry) == PFN_FROM_SSE(OldEntry)) {
/* Nothing */
} else if (Entry && !IS_SWAP_FROM_SSE(Entry)) {
ASSERT(!OldEntry || IS_SWAP_FROM_SSE(OldEntry));
MmSetSectionAssociation(PFN_FROM_SSE(Entry), Segment, Offset);
} else if (OldEntry && !IS_SWAP_FROM_SSE(OldEntry)) {
ASSERT(!Entry || IS_SWAP_FROM_SSE(Entry));
MmDeleteSectionAssociation(PFN_FROM_SSE(OldEntry));
} else if (IS_SWAP_FROM_SSE(Entry)) {
ASSERT(!IS_SWAP_FROM_SSE(OldEntry) ||
SWAPENTRY_FROM_SSE(OldEntry) == MM_WAIT_ENTRY);
if (OldEntry && SWAPENTRY_FROM_SSE(OldEntry) != MM_WAIT_ENTRY)
MmDeleteSectionAssociation(PFN_FROM_SSE(OldEntry));
} else if (IS_SWAP_FROM_SSE(OldEntry)) {
ASSERT(!IS_SWAP_FROM_SSE(Entry));
if (Entry)
MmSetSectionAssociation(PFN_FROM_SSE(OldEntry), Segment, Offset);
} else {
/* We should not be replacing a page like this */
ASSERT(FALSE);
if (Entry && !IS_SWAP_FROM_SSE(Entry))
{
/* We have a valid entry. See if we must do something */
if (OldEntry && !IS_SWAP_FROM_SSE(OldEntry))
{
/* The previous entry was valid. Shall we swap the Rmaps ? */
if (PFN_FROM_SSE(Entry) != PFN_FROM_SSE(OldEntry))
{
MmDeleteSectionAssociation(PFN_FROM_SSE(OldEntry));
MmSetSectionAssociation(PFN_FROM_SSE(Entry), Segment, Offset);
}
}
else
{
/*
* We're switching to a valid entry from an invalid one.
* Add the Rmap and take a ref on the segment.
*/
MmSetSectionAssociation(PFN_FROM_SSE(Entry), Segment, Offset);
InterlockedIncrement64(Segment->ReferenceCount);
if (Offset->QuadPart >= (Segment->LastPage << PAGE_SHIFT))
Segment->LastPage = (Offset->QuadPart >> PAGE_SHIFT) + 1;
}
}
else if (OldEntry && !IS_SWAP_FROM_SSE(OldEntry))
{
/* We're switching to an invalid entry from a valid one */
MmDeleteSectionAssociation(PFN_FROM_SSE(OldEntry));
MmDereferenceSegment(Segment);
if (Offset->QuadPart == ((Segment->LastPage - 1ULL) << PAGE_SHIFT))
{
/* We are unsetting the last page */
while (--Segment->LastPage)
{
LARGE_INTEGER CheckOffset;
CheckOffset.QuadPart = (Segment->LastPage - 1) << PAGE_SHIFT;
ULONG_PTR Entry = MmGetPageEntrySectionSegment(Segment, &CheckOffset);
if ((Entry != 0) && !IS_SWAP_FROM_SSE(Entry))
break;
}
}
}
PageTable->PageEntries[PageIndex] = Entry;
return STATUS_SUCCESS;
}
@ -335,13 +356,13 @@ MmGetSectionAssociation(PFN_NUMBER Page,
PMM_SECTION_SEGMENT Segment = NULL;
PCACHE_SECTION_PAGE_TABLE PageTable;
PageTable = (PCACHE_SECTION_PAGE_TABLE)MmGetSegmentRmap(Page,
&RawOffset);
PageTable = MmGetSegmentRmap(Page, &RawOffset);
if (PageTable)
{
Segment = PageTable->Segment;
Offset->QuadPart = PageTable->FileOffset.QuadPart +
((ULONG64)RawOffset << PAGE_SHIFT);
ASSERT(PFN_FROM_SSE(PageTable->PageEntries[RawOffset]) == Page);
}
return Segment;

View file

@ -42,7 +42,6 @@ CcPfInitializePrefetcher(VOID)
CODE_SEG("INIT")
BOOLEAN
NTAPI
CcInitializeCacheManager(VOID)
{
ULONG Thread;
@ -308,7 +307,7 @@ CcSetBcbOwnerPointer (
IN PVOID Owner
)
{
PINTERNAL_BCB iBcb = Bcb;
PINTERNAL_BCB iBcb = CONTAINING_RECORD(Bcb, INTERNAL_BCB, PFCB);
CCTRACE(CC_API_DEBUG, "Bcb=%p Owner=%p\n",
Bcb, Owner);

View file

@ -20,13 +20,6 @@ static PFN_NUMBER CcZeroPage = 0;
#define MAX_ZERO_LENGTH (256 * 1024)
typedef enum _CC_COPY_OPERATION
{
CcOperationRead,
CcOperationWrite,
CcOperationZero
} CC_COPY_OPERATION;
typedef enum _CC_CAN_WRITE_RETRY
{
FirstTry = 0,
@ -35,7 +28,7 @@ typedef enum _CC_CAN_WRITE_RETRY
RetryMasterLocked = 255,
} CC_CAN_WRITE_RETRY;
ULONG CcRosTraceLevel = 0;
ULONG CcRosTraceLevel = CC_API_DEBUG;
ULONG CcFastMdlReadWait;
ULONG CcFastMdlReadNotPossible;
ULONG CcFastReadNotPossible;
@ -76,338 +69,6 @@ CcInitCacheZeroPage (
MiZeroPhysicalPage(CcZeroPage);
}
NTSTATUS
NTAPI
CcReadVirtualAddress (
PROS_VACB Vacb)
{
ULONG Size;
PMDL Mdl;
NTSTATUS Status;
IO_STATUS_BLOCK IoStatus;
KEVENT Event;
ULARGE_INTEGER LargeSize;
LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
{
LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
}
Size = LargeSize.LowPart;
Size = ROUND_TO_PAGES(Size);
ASSERT(Size <= VACB_MAPPING_GRANULARITY);
ASSERT(Size > 0);
Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
if (!Mdl)
{
return STATUS_INSUFFICIENT_RESOURCES;
}
Status = STATUS_SUCCESS;
_SEH2_TRY
{
MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
}
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
{
Status = _SEH2_GetExceptionCode();
DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
KeBugCheck(CACHE_MANAGER);
} _SEH2_END;
if (NT_SUCCESS(Status))
{
Mdl->MdlFlags |= MDL_IO_PAGE_READ;
KeInitializeEvent(&Event, NotificationEvent, FALSE);
Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
if (Status == STATUS_PENDING)
{
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
Status = IoStatus.Status;
}
MmUnlockPages(Mdl);
}
IoFreeMdl(Mdl);
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
{
DPRINT1("IoPageRead failed, Status %x\n", Status);
return Status;
}
if (Size < VACB_MAPPING_GRANULARITY)
{
RtlZeroMemory((char*)Vacb->BaseAddress + Size,
VACB_MAPPING_GRANULARITY - Size);
}
return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
CcWriteVirtualAddress (
PROS_VACB Vacb)
{
ULONG Size;
PMDL Mdl;
NTSTATUS Status;
IO_STATUS_BLOCK IoStatus;
KEVENT Event;
ULARGE_INTEGER LargeSize;
LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
{
LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
}
Size = LargeSize.LowPart;
//
// Nonpaged pool PDEs in ReactOS must actually be synchronized between the
// MmGlobalPageDirectory and the real system PDE directory. What a mess...
//
{
ULONG i = 0;
do
{
MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
} while (++i < (Size >> PAGE_SHIFT));
}
ASSERT(Size <= VACB_MAPPING_GRANULARITY);
ASSERT(Size > 0);
Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
if (!Mdl)
{
return STATUS_INSUFFICIENT_RESOURCES;
}
Status = STATUS_SUCCESS;
_SEH2_TRY
{
MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
}
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
{
Status = _SEH2_GetExceptionCode();
DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
KeBugCheck(CACHE_MANAGER);
} _SEH2_END;
if (NT_SUCCESS(Status))
{
KeInitializeEvent(&Event, NotificationEvent, FALSE);
Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
if (Status == STATUS_PENDING)
{
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
Status = IoStatus.Status;
}
MmUnlockPages(Mdl);
}
IoFreeMdl(Mdl);
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
{
DPRINT1("IoPageWrite failed, Status %x\n", Status);
return Status;
}
return STATUS_SUCCESS;
}
NTSTATUS
ReadWriteOrZero(
_Inout_ PVOID BaseAddress,
_Inout_opt_ PVOID Buffer,
_In_ ULONG Length,
_In_ CC_COPY_OPERATION Operation)
{
NTSTATUS Status = STATUS_SUCCESS;
if (Operation == CcOperationZero)
{
/* Zero */
RtlZeroMemory(BaseAddress, Length);
}
else
{
_SEH2_TRY
{
if (Operation == CcOperationWrite)
RtlCopyMemory(BaseAddress, Buffer, Length);
else
RtlCopyMemory(Buffer, BaseAddress, Length);
}
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
Status = _SEH2_GetExceptionCode();
}
_SEH2_END;
}
return Status;
}
BOOLEAN
CcCopyData (
_In_ PFILE_OBJECT FileObject,
_In_ LONGLONG FileOffset,
_Inout_ PVOID Buffer,
_In_ LONGLONG Length,
_In_ CC_COPY_OPERATION Operation,
_In_ BOOLEAN Wait,
_Out_ PIO_STATUS_BLOCK IoStatus)
{
NTSTATUS Status;
LONGLONG CurrentOffset;
ULONG BytesCopied;
KIRQL OldIrql;
PROS_SHARED_CACHE_MAP SharedCacheMap;
PLIST_ENTRY ListEntry;
PROS_VACB Vacb;
ULONG PartialLength;
PVOID BaseAddress;
BOOLEAN Valid;
PPRIVATE_CACHE_MAP PrivateCacheMap;
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
PrivateCacheMap = FileObject->PrivateCacheMap;
CurrentOffset = FileOffset;
BytesCopied = 0;
if (!Wait)
{
/* test if the requested data is available */
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
/* FIXME: this loop doesn't take into account areas that don't have
* a VACB in the list yet */
ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
{
Vacb = CONTAINING_RECORD(ListEntry,
ROS_VACB,
CacheMapVacbListEntry);
ListEntry = ListEntry->Flink;
if (!Vacb->Valid &&
DoRangesIntersect(Vacb->FileOffset.QuadPart,
VACB_MAPPING_GRANULARITY,
CurrentOffset, Length))
{
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
/* data not available */
return FALSE;
}
if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
break;
}
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
}
PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
if (PartialLength != 0)
{
PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
Status = CcRosRequestVacb(SharedCacheMap,
ROUND_DOWN(CurrentOffset,
VACB_MAPPING_GRANULARITY),
&BaseAddress,
&Valid,
&Vacb);
if (!NT_SUCCESS(Status))
ExRaiseStatus(Status);
if (!Valid)
{
Status = CcReadVirtualAddress(Vacb);
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
ExRaiseStatus(Status);
}
}
Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
Buffer,
PartialLength,
Operation);
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
if (!NT_SUCCESS(Status))
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
Length -= PartialLength;
CurrentOffset += PartialLength;
BytesCopied += PartialLength;
if (Operation != CcOperationZero)
Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
}
while (Length > 0)
{
ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
Status = CcRosRequestVacb(SharedCacheMap,
CurrentOffset,
&BaseAddress,
&Valid,
&Vacb);
if (!NT_SUCCESS(Status))
ExRaiseStatus(Status);
if (!Valid &&
(Operation == CcOperationRead ||
PartialLength < VACB_MAPPING_GRANULARITY))
{
Status = CcReadVirtualAddress(Vacb);
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
ExRaiseStatus(Status);
}
}
Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
if (!NT_SUCCESS(Status))
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
Length -= PartialLength;
CurrentOffset += PartialLength;
BytesCopied += PartialLength;
if (Operation != CcOperationZero)
Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
}
/* If that was a successful sync read operation, let's handle read ahead */
if (Operation == CcOperationRead && Length == 0 && Wait)
{
/* If file isn't random access and next read may get us cross VACB boundary,
* schedule next read
*/
if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) &&
(CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + BytesCopied - 1) / VACB_MAPPING_GRANULARITY)
{
CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
}
/* And update read history in private cache map */
PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
PrivateCacheMap->FileOffset2.QuadPart = FileOffset;
PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset + BytesCopied;
}
IoStatus->Status = STATUS_SUCCESS;
IoStatus->Information = BytesCopied;
return TRUE;
}
VOID
CcPostDeferredWrites(VOID)
{
@ -492,8 +153,6 @@ CcPerformReadAhead(
PROS_SHARED_CACHE_MAP SharedCacheMap;
PROS_VACB Vacb;
ULONG PartialLength;
PVOID BaseAddress;
BOOLEAN Valid;
ULONG Length;
PPRIVATE_CACHE_MAP PrivateCacheMap;
BOOLEAN Locked;
@ -556,10 +215,7 @@ CcPerformReadAhead(
{
PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
Status = CcRosRequestVacb(SharedCacheMap,
ROUND_DOWN(CurrentOffset,
VACB_MAPPING_GRANULARITY),
&BaseAddress,
&Valid,
ROUND_DOWN(CurrentOffset, VACB_MAPPING_GRANULARITY),
&Vacb);
if (!NT_SUCCESS(Status))
{
@ -567,18 +223,16 @@ CcPerformReadAhead(
goto Clear;
}
if (!Valid)
Status = CcRosEnsureVacbResident(Vacb, TRUE, FALSE,
CurrentOffset % VACB_MAPPING_GRANULARITY, PartialLength);
if (!NT_SUCCESS(Status))
{
Status = CcReadVirtualAddress(Vacb);
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
DPRINT1("Failed to read data: %lx!\n", Status);
goto Clear;
}
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
DPRINT1("Failed to read data: %lx!\n", Status);
goto Clear;
}
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
Length -= PartialLength;
CurrentOffset += PartialLength;
@ -590,8 +244,6 @@ CcPerformReadAhead(
PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
Status = CcRosRequestVacb(SharedCacheMap,
CurrentOffset,
&BaseAddress,
&Valid,
&Vacb);
if (!NT_SUCCESS(Status))
{
@ -599,18 +251,15 @@ CcPerformReadAhead(
goto Clear;
}
if (!Valid)
Status = CcRosEnsureVacbResident(Vacb, TRUE, FALSE, 0, PartialLength);
if (!NT_SUCCESS(Status))
{
Status = CcReadVirtualAddress(Vacb);
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
DPRINT1("Failed to read data: %lx!\n", Status);
goto Clear;
}
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
DPRINT1("Failed to read data: %lx!\n", Status);
goto Clear;
}
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
Length -= PartialLength;
CurrentOffset += PartialLength;
@ -798,6 +447,26 @@ CcCanIWrite (
return TRUE;
}
static
int
CcpCheckInvalidUserBuffer(PEXCEPTION_POINTERS Except, PVOID Buffer, ULONG Length)
{
ULONG_PTR ExceptionAddress;
ULONG_PTR BeginAddress = (ULONG_PTR)Buffer;
ULONG_PTR EndAddress = (ULONG_PTR)Buffer + Length;
if (Except->ExceptionRecord->ExceptionCode != STATUS_ACCESS_VIOLATION)
return EXCEPTION_CONTINUE_SEARCH;
if (Except->ExceptionRecord->NumberParameters < 2)
return EXCEPTION_CONTINUE_SEARCH;
ExceptionAddress = Except->ExceptionRecord->ExceptionInformation[1];
if ((ExceptionAddress >= BeginAddress) && (ExceptionAddress < EndAddress))
return EXCEPTION_EXECUTE_HANDLER;
return EXCEPTION_CONTINUE_SEARCH;
}
/*
* @implemented
*/
@ -811,6 +480,13 @@ CcCopyRead (
OUT PVOID Buffer,
OUT PIO_STATUS_BLOCK IoStatus)
{
PROS_VACB Vacb;
PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
NTSTATUS Status;
LONGLONG CurrentOffset;
LONGLONG ReadEnd = FileOffset->QuadPart + Length;
ULONG ReadLength = 0;
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
FileObject, FileOffset->QuadPart, Length, Wait);
@ -819,13 +495,81 @@ CcCopyRead (
FileObject, FileOffset->QuadPart, Length, Wait,
Buffer, IoStatus);
return CcCopyData(FileObject,
FileOffset->QuadPart,
Buffer,
Length,
CcOperationRead,
Wait,
IoStatus);
if (!SharedCacheMap)
return FALSE;
/* Documented to ASSERT, but KMTests test this case... */
// ASSERT((FileOffset->QuadPart + Length) <= SharedCacheMap->FileSize.QuadPart);
CurrentOffset = FileOffset->QuadPart;
while(CurrentOffset < ReadEnd)
{
Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb);
if (!NT_SUCCESS(Status))
{
ExRaiseStatus(Status);
return FALSE;
}
_SEH2_TRY
{
ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY;
ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset);
SIZE_T CopyLength = VacbLength;
if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength))
return FALSE;
_SEH2_TRY
{
RtlCopyMemory(Buffer, (PUCHAR)Vacb->BaseAddress + VacbOffset, CopyLength);
}
_SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength))
{
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
}
_SEH2_END;
ReadLength += VacbLength;
Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength);
CurrentOffset += VacbLength;
Length -= VacbLength;
}
_SEH2_FINALLY
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
}
_SEH2_END;
}
IoStatus->Status = STATUS_SUCCESS;
IoStatus->Information = ReadLength;
#if 0
/* If that was a successful sync read operation, let's handle read ahead */
if (Length == 0 && Wait)
{
PPRIVATE_CACHE_MAP PrivateCacheMap = FileObject->PrivateCacheMap;
/* If file isn't random access and next read may get us cross VACB boundary,
* schedule next read
*/
if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) &&
(CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + ReadLength - 1) / VACB_MAPPING_GRANULARITY)
{
CcScheduleReadAhead(FileObject, FileOffset, ReadLength);
}
/* And update read history in private cache map */
PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
PrivateCacheMap->FileOffset2.QuadPart = FileOffset->QuadPart;
PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset->QuadPart + ReadLength;
}
#endif
return TRUE;
}
/*
@ -840,7 +584,11 @@ CcCopyWrite (
IN BOOLEAN Wait,
IN PVOID Buffer)
{
IO_STATUS_BLOCK IoStatus;
PROS_VACB Vacb;
PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
NTSTATUS Status;
LONGLONG CurrentOffset;
LONGLONG WriteEnd;
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
@ -849,13 +597,66 @@ CcCopyWrite (
"Length %lu, Wait %u, Buffer 0x%p)\n",
FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
return CcCopyData(FileObject,
FileOffset->QuadPart,
Buffer,
Length,
CcOperationWrite,
Wait,
&IoStatus);
if (!SharedCacheMap)
return FALSE;
Status = RtlLongLongAdd(FileOffset->QuadPart, Length, &WriteEnd);
if (!NT_SUCCESS(Status))
ExRaiseStatus(Status);
ASSERT(WriteEnd <= SharedCacheMap->SectionSize.QuadPart);
CurrentOffset = FileOffset->QuadPart;
while(CurrentOffset < WriteEnd)
{
ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY;
ULONG VacbLength = min(WriteEnd - CurrentOffset, VACB_MAPPING_GRANULARITY - VacbOffset);
Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb);
if (!NT_SUCCESS(Status))
{
ExRaiseStatus(Status);
return FALSE;
}
_SEH2_TRY
{
if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength))
{
return FALSE;
}
_SEH2_TRY
{
RtlCopyMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), Buffer, VacbLength);
}
_SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength))
{
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
}
_SEH2_END;
Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength);
CurrentOffset += VacbLength;
/* Tell Mm */
Status = MmMakePagesDirty(NULL, Add2Ptr(Vacb->BaseAddress, VacbOffset), VacbLength);
if (!NT_SUCCESS(Status))
ExRaiseStatus(Status);
}
_SEH2_FINALLY
{
/* Do not mark the VACB as dirty if an exception was raised */
CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE);
}
_SEH2_END;
}
/* Flush if needed */
if (FileObject->Flags & FO_WRITE_THROUGH)
CcFlushCache(FileObject->SectionObjectPointer, FileOffset, Length, NULL);
return TRUE;
}
/*
@ -999,11 +800,8 @@ CcZeroData (
NTSTATUS Status;
LARGE_INTEGER WriteOffset;
LONGLONG Length;
ULONG CurrentLength;
PMDL Mdl;
ULONG i;
IO_STATUS_BLOCK Iosb;
KEVENT Event;
PROS_VACB Vacb;
PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
@ -1015,28 +813,33 @@ CcZeroData (
Length = EndOffset->QuadPart - StartOffset->QuadPart;
WriteOffset.QuadPart = StartOffset->QuadPart;
if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
if (!SharedCacheMap)
{
/* File is not cached */
/* Make this a non-cached write */
IO_STATUS_BLOCK Iosb;
KEVENT Event;
PMDL Mdl;
ULONG i;
ULONG CurrentLength;
PPFN_NUMBER PfnArray;
Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
/* Setup our Mdl */
Mdl = IoAllocateMdl(NULL, min(Length, MAX_ZERO_LENGTH), FALSE, FALSE, NULL);
if (!Mdl)
ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
PfnArray = MmGetMdlPfnArray(Mdl);
for (i = 0; i < BYTES_TO_PAGES(Mdl->ByteCount); i++)
PfnArray[i] = CcZeroPage;
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
/* Perform the write sequencially */
while (Length > 0)
{
if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
{
CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
}
else
{
CurrentLength = Length;
}
MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
{
((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
}
CurrentLength = min(Length, MAX_ZERO_LENGTH);
Mdl->ByteCount = CurrentLength;
KeInitializeEvent(&Event, NotificationEvent, FALSE);
Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
if (Status == STATUS_PENDING)
@ -1050,24 +853,68 @@ CcZeroData (
}
if (!NT_SUCCESS(Status))
{
return FALSE;
IoFreeMdl(Mdl);
ExRaiseStatus(Status);
}
WriteOffset.QuadPart += CurrentLength;
Length -= CurrentLength;
}
}
else
{
IO_STATUS_BLOCK IoStatus;
return CcCopyData(FileObject,
WriteOffset.QuadPart,
NULL,
Length,
CcOperationZero,
Wait,
&IoStatus);
IoFreeMdl(Mdl);
return TRUE;
}
/* See if we should simply truncate the valid data length */
if ((StartOffset->QuadPart < SharedCacheMap->ValidDataLength.QuadPart) && (EndOffset->QuadPart >= SharedCacheMap->ValidDataLength.QuadPart))
{
DPRINT1("Truncating VDL.\n");
SharedCacheMap->ValidDataLength = *StartOffset;
return TRUE;
}
ASSERT(EndOffset->QuadPart <= SharedCacheMap->SectionSize.QuadPart);
while(WriteOffset.QuadPart < EndOffset->QuadPart)
{
ULONG VacbOffset = WriteOffset.QuadPart % VACB_MAPPING_GRANULARITY;
ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset);
Status = CcRosGetVacb(SharedCacheMap, WriteOffset.QuadPart, &Vacb);
if (!NT_SUCCESS(Status))
{
ExRaiseStatus(Status);
return FALSE;
}
_SEH2_TRY
{
if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength))
{
return FALSE;
}
RtlZeroMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), VacbLength);
WriteOffset.QuadPart += VacbLength;
Length -= VacbLength;
/* Tell Mm */
Status = MmMakePagesDirty(NULL, Add2Ptr(Vacb->BaseAddress, VacbOffset), VacbLength);
if (!NT_SUCCESS(Status))
ExRaiseStatus(Status);
}
_SEH2_FINALLY
{
/* Do not mark the VACB as dirty if an exception was raised */
CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE);
}
_SEH2_END;
}
/* Flush if needed */
if (FileObject->Flags & FO_WRITE_THROUGH)
CcFlushCache(FileObject->SectionObjectPointer, StartOffset, EndOffset->QuadPart - StartOffset->QuadPart, NULL);
return TRUE;
}

View file

@ -10,13 +10,10 @@
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* GLOBALS *****************************************************************/
NTSTATUS CcRosInternalFreeVacb(PROS_VACB Vacb);
/* FUNCTIONS *****************************************************************/
/*
@ -48,7 +45,7 @@ NTAPI
CcGetFileObjectFromBcb (
IN PVOID Bcb)
{
PINTERNAL_BCB iBcb = (PINTERNAL_BCB)Bcb;
PINTERNAL_BCB iBcb = CONTAINING_RECORD(Bcb, INTERNAL_BCB, PFCB);
CCTRACE(CC_API_DEBUG, "Bcb=%p\n", Bcb);
@ -183,7 +180,10 @@ CcPurgeCacheSection (
SharedCacheMap = SectionObjectPointer->SharedCacheMap;
if (!SharedCacheMap)
return FALSE;
{
Success = TRUE;
goto purgeMm;
}
StartOffset = FileOffset != NULL ? FileOffset->QuadPart : 0;
if (Length == 0 || FileOffset == NULL)
@ -260,6 +260,11 @@ CcPurgeCacheSection (
ASSERT(Refs == 0);
}
/* Now make sure that Mm doesn't hold some pages here. */
purgeMm:
if (Success)
Success = MmPurgeSegment(SectionObjectPointer, FileOffset, Length);
return Success;
}
@ -272,8 +277,9 @@ CcSetFileSizes (
IN PFILE_OBJECT FileObject,
IN PCC_FILE_SIZES FileSizes)
{
KIRQL oldirql;
KIRQL OldIrql;
PROS_SHARED_CACHE_MAP SharedCacheMap;
LARGE_INTEGER OldSectionSize;
CCTRACE(CC_API_DEBUG, "FileObject=%p FileSizes=%p\n",
FileObject, FileSizes);
@ -294,7 +300,15 @@ CcSetFileSizes (
if (SharedCacheMap == NULL)
return;
if (FileSizes->AllocationSize.QuadPart < SharedCacheMap->SectionSize.QuadPart)
/* Update the relevant fields */
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
OldSectionSize = SharedCacheMap->SectionSize;
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
SharedCacheMap->FileSize = FileSizes->FileSize;
SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength;
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
if (FileSizes->AllocationSize.QuadPart < OldSectionSize.QuadPart)
{
CcPurgeCacheSection(FileObject->SectionObjectPointer,
&FileSizes->AllocationSize,
@ -303,46 +317,9 @@ CcSetFileSizes (
}
else
{
PROS_VACB LastVacb;
/*
* If file (allocation) size has increased, then we need to check whether
* it just grows in a single VACB (the last one).
* If so, we must mark the VACB as invalid to trigger a read to the
* FSD at the next VACB usage, and thus avoid returning garbage
*/
/* Check for allocation size and the last VACB */
if (SharedCacheMap->SectionSize.QuadPart < FileSizes->AllocationSize.QuadPart &&
SharedCacheMap->SectionSize.QuadPart % VACB_MAPPING_GRANULARITY)
{
LastVacb = CcRosLookupVacb(SharedCacheMap,
SharedCacheMap->SectionSize.QuadPart);
if (LastVacb != NULL)
{
/* Mark it as invalid */
CcRosReleaseVacb(SharedCacheMap, LastVacb, LastVacb->Dirty ? LastVacb->Valid : FALSE, FALSE, FALSE);
}
}
/* Check for file size and the last VACB */
if (SharedCacheMap->FileSize.QuadPart < FileSizes->FileSize.QuadPart &&
SharedCacheMap->FileSize.QuadPart % VACB_MAPPING_GRANULARITY)
{
LastVacb = CcRosLookupVacb(SharedCacheMap,
SharedCacheMap->FileSize.QuadPart);
if (LastVacb != NULL)
{
/* Mark it as invalid */
CcRosReleaseVacb(SharedCacheMap, LastVacb, LastVacb->Dirty ? LastVacb->Valid : FALSE, FALSE, FALSE);
}
}
/* Extend our section object */
MmExtendSection(SharedCacheMap->Section, &SharedCacheMap->SectionSize);
}
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
SharedCacheMap->FileSize = FileSizes->FileSize;
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
}
/*

View file

@ -67,91 +67,6 @@ CcpFindBcb(
return (Found ? Bcb : NULL);
}
static
BOOLEAN
NTAPI
CcpMapData(
IN PROS_SHARED_CACHE_MAP SharedCacheMap,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN ULONG Flags,
OUT PROS_VACB *pVacb,
OUT PVOID *pBuffer)
{
LONGLONG ReadOffset, BaseOffset;
BOOLEAN Valid;
PROS_VACB Vacb;
NTSTATUS Status;
LONGLONG ROffset;
ReadOffset = FileOffset->QuadPart;
DPRINT("SectionSize %I64x, FileSize %I64x\n",
SharedCacheMap->SectionSize.QuadPart,
SharedCacheMap->FileSize.QuadPart);
if (ReadOffset % VACB_MAPPING_GRANULARITY + Length > VACB_MAPPING_GRANULARITY)
{
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
SharedCacheMap->FileObject, FileOffset, Length, Flags);
return FALSE;
}
if (!BooleanFlagOn(Flags, MAP_NO_READ))
{
static int Warned = 0;
SetFlag(Flags, MAP_NO_READ);
if (!Warned)
{
DPRINT1("Mapping/pinning with no read not implemented. Forcing read, might fail if wait not allowed\n");
Warned++;
}
}
/* Properly round offset and call internal helper for getting a VACB */
ROffset = ROUND_DOWN(ReadOffset, VACB_MAPPING_GRANULARITY);
Status = CcRosGetVacb(SharedCacheMap,
ROffset,
&BaseOffset,
pBuffer,
&Valid,
&Vacb);
if (!NT_SUCCESS(Status))
{
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
SharedCacheMap->FileObject, FileOffset, Length, Flags);
ExRaiseStatus(Status);
return FALSE;
}
if (!Valid && BooleanFlagOn(Flags, MAP_NO_READ))
{
if (!BooleanFlagOn(Flags, MAP_WAIT))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
SharedCacheMap->FileObject, FileOffset, Length, Flags);
return FALSE;
}
Status = CcReadVirtualAddress(Vacb);
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
SharedCacheMap->FileObject, FileOffset, Length, Flags);
ExRaiseStatus(Status);
return FALSE;
}
}
*pBuffer = (PUCHAR)*pBuffer + ReadOffset % VACB_MAPPING_GRANULARITY;
*pVacb = Vacb;
return TRUE;
}
static
VOID
CcpDereferenceBcb(
@ -176,7 +91,6 @@ CcpDereferenceBcb(
*/
CcRosReleaseVacb(SharedCacheMap,
Bcb->Vacb,
TRUE,
FALSE,
FALSE);
@ -206,13 +120,13 @@ CcpGetAppropriateBcb(
iBcb = ExAllocateFromNPagedLookasideList(&iBcbLookasideList);
if (iBcb == NULL)
{
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
return NULL;
}
RtlZeroMemory(iBcb, sizeof(*iBcb));
iBcb->PFCB.NodeTypeCode = 0xDE45; /* Undocumented (CAPTIVE_PUBLIC_BCB_NODETYPECODE) */
iBcb->PFCB.NodeByteSize = sizeof(PUBLIC_BCB);
iBcb->PFCB.NodeTypeCode = 0x2FD; /* As per KMTests */
iBcb->PFCB.NodeByteSize = 0;
iBcb->PFCB.MappedLength = Length;
iBcb->PFCB.MappedFileOffset = *FileOffset;
iBcb->Vacb = Vacb;
@ -257,7 +171,7 @@ CcpGetAppropriateBcb(
if (DupBcb != NULL)
{
/* Delete the loser */
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
ExDeleteResourceLite(&iBcb->Lock);
ExFreeToNPagedLookasideList(&iBcbLookasideList, iBcb);
}
@ -304,44 +218,48 @@ CcpPinData(
OUT PVOID * Buffer)
{
PINTERNAL_BCB NewBcb;
BOOLEAN Result;
PROS_VACB Vacb;
KIRQL OldIrql;
ULONG MapFlags;
ULONG VacbOffset;
NTSTATUS Status;
BOOLEAN Result;
VacbOffset = (ULONG)(FileOffset->QuadPart % VACB_MAPPING_GRANULARITY);
if ((VacbOffset + Length) > VACB_MAPPING_GRANULARITY)
{
/* Complain loudly, we shoud pin the whole range */
DPRINT1("TRUNCATING DATA PIN FROM %lu to %lu!\n", Length, VACB_MAPPING_GRANULARITY - VacbOffset);
Length = VACB_MAPPING_GRANULARITY - VacbOffset;
}
KeAcquireSpinLock(&SharedCacheMap->BcbSpinLock, &OldIrql);
NewBcb = CcpFindBcb(SharedCacheMap, FileOffset, Length, TRUE);
if (NewBcb != NULL)
{
BOOLEAN Result;
++NewBcb->RefCount;
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
if (BooleanFlagOn(Flags, PIN_EXCLUSIVE))
{
Result = ExAcquireResourceExclusiveLite(&NewBcb->Lock, BooleanFlagOn(Flags, PIN_WAIT));
}
else
{
Result = ExAcquireSharedStarveExclusive(&NewBcb->Lock, BooleanFlagOn(Flags, PIN_WAIT));
}
if (!Result)
{
CcpDereferenceBcb(SharedCacheMap, NewBcb);
NewBcb = NULL;
}
else
{
NewBcb->PinCount++;
*Bcb = NewBcb;
*Buffer = (PUCHAR)NewBcb->Vacb->BaseAddress + FileOffset->QuadPart % VACB_MAPPING_GRANULARITY;
return FALSE;
}
return Result;
NewBcb->PinCount++;
}
else
{
LONGLONG ROffset;
PROS_VACB Vacb;
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
if (BooleanFlagOn(Flags, PIN_IF_BCB))
@ -349,29 +267,50 @@ CcpPinData(
return FALSE;
}
MapFlags = Flags & PIN_WAIT;
if (BooleanFlagOn(Flags, PIN_NO_READ))
/* Properly round offset and call internal helper for getting a VACB */
ROffset = ROUND_DOWN(FileOffset->QuadPart, VACB_MAPPING_GRANULARITY);
Status = CcRosGetVacb(SharedCacheMap, ROffset, &Vacb);
if (!NT_SUCCESS(Status))
{
SetFlag(MapFlags, MAP_NO_READ);
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
SharedCacheMap->FileObject, FileOffset, Length, Flags);
ExRaiseStatus(Status);
return FALSE;
}
Result = CcpMapData(SharedCacheMap, FileOffset, Length, MapFlags, &Vacb, Buffer);
if (Result)
NewBcb = CcpGetAppropriateBcb(SharedCacheMap, Vacb, FileOffset, Length, Flags, TRUE);
if (NewBcb == NULL)
{
NewBcb = CcpGetAppropriateBcb(SharedCacheMap, Vacb, FileOffset, Length, Flags, TRUE);
if (NewBcb == NULL)
{
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
Result = FALSE;
}
else
{
*Bcb = NewBcb;
}
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
return FALSE;
}
}
return Result;
Result = FALSE;
_SEH2_TRY
{
/* Ensure the pages are resident */
Result = CcRosEnsureVacbResident(NewBcb->Vacb,
BooleanFlagOn(Flags, PIN_WAIT),
BooleanFlagOn(Flags, PIN_NO_READ),
VacbOffset, Length);
}
_SEH2_FINALLY
{
if (!Result)
{
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
SharedCacheMap->FileObject, FileOffset, Length, Flags);
CcUnpinData(&NewBcb->PFCB);
return FALSE;
}
}
_SEH2_END;
*Bcb = &NewBcb->PFCB;
*Buffer = (PVOID)((ULONG_PTR)NewBcb->Vacb->BaseAddress + VacbOffset);
return TRUE;
}
/*
@ -387,13 +326,15 @@ CcMapData (
OUT PVOID *pBcb,
OUT PVOID *pBuffer)
{
BOOLEAN Ret;
KIRQL OldIrql;
PINTERNAL_BCB iBcb;
PROS_VACB Vacb;
PROS_SHARED_CACHE_MAP SharedCacheMap;
ULONG VacbOffset;
NTSTATUS Status;
BOOLEAN Result;
DPRINT("CcMapData(FileObject 0x%p, FileOffset %I64x, Length %lu, Flags 0x%lx,"
CCTRACE(CC_API_DEBUG, "CcMapData(FileObject 0x%p, FileOffset 0x%I64x, Length %lu, Flags 0x%lx,"
" pBcb 0x%p, pBuffer 0x%p)\n", FileObject, FileOffset->QuadPart,
Length, Flags, pBcb, pBuffer);
@ -413,6 +354,14 @@ CcMapData (
++CcMapDataNoWait;
}
VacbOffset = (ULONG)(FileOffset->QuadPart % VACB_MAPPING_GRANULARITY);
/* KMTests seem to show that it is allowed to call accross mapping granularity */
if ((VacbOffset + Length) > VACB_MAPPING_GRANULARITY)
{
DPRINT1("TRUNCATING DATA MAP FROM %lu to %lu!\n", Length, VACB_MAPPING_GRANULARITY - VacbOffset);
Length = VACB_MAPPING_GRANULARITY - VacbOffset;
}
KeAcquireSpinLock(&SharedCacheMap->BcbSpinLock, &OldIrql);
iBcb = CcpFindBcb(SharedCacheMap, FileOffset, Length, FALSE);
@ -420,34 +369,54 @@ CcMapData (
{
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
Ret = CcpMapData(SharedCacheMap, FileOffset, Length, Flags, &Vacb, pBuffer);
if (Ret)
/* Call internal helper for getting a VACB */
Status = CcRosGetVacb(SharedCacheMap, FileOffset->QuadPart, &Vacb);
if (!NT_SUCCESS(Status))
{
iBcb = CcpGetAppropriateBcb(SharedCacheMap, Vacb, FileOffset, Length, 0, FALSE);
if (iBcb == NULL)
{
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
Ret = FALSE;
}
else
{
*pBcb = iBcb;
}
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
SharedCacheMap->FileObject, FileOffset, Length, Flags);
ExRaiseStatus(Status);
return FALSE;
}
iBcb = CcpGetAppropriateBcb(SharedCacheMap, Vacb, FileOffset, Length, 0, FALSE);
if (iBcb == NULL)
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
SharedCacheMap->FileObject, FileOffset, Length, Flags);
return FALSE;
}
}
else
{
++iBcb->RefCount;
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
*pBcb = iBcb;
*pBuffer = (PUCHAR)iBcb->Vacb->BaseAddress + FileOffset->QuadPart % VACB_MAPPING_GRANULARITY;
Ret = TRUE;
}
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> %d Bcb=%p\n",
FileObject, FileOffset, Length, Flags, Ret, *pBcb);
return Ret;
_SEH2_TRY
{
Result = FALSE;
/* Ensure the pages are resident */
Result = CcRosEnsureVacbResident(iBcb->Vacb, BooleanFlagOn(Flags, MAP_WAIT),
BooleanFlagOn(Flags, MAP_NO_READ), VacbOffset, Length);
}
_SEH2_FINALLY
{
if (!Result)
{
CcpDereferenceBcb(SharedCacheMap, iBcb);
return FALSE;
}
}
_SEH2_END;
*pBcb = &iBcb->PFCB;
*pBuffer = (PVOID)((ULONG_PTR)iBcb->Vacb->BaseAddress + VacbOffset);
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> TRUE Bcb=%p, Buffer %p\n",
FileObject, FileOffset, Length, Flags, *pBcb, *pBuffer);
return Result;
}
/*
@ -482,14 +451,14 @@ CcPinMappedData (
return FALSE;
}
iBcb = *Bcb;
iBcb = *Bcb ? CONTAINING_RECORD(*Bcb, INTERNAL_BCB, PFCB) : NULL;
++CcPinMappedDataCount;
Result = CcpPinData(SharedCacheMap, FileOffset, Length, Flags, Bcb, &Buffer);
if (Result)
{
CcUnpinData(iBcb);
CcUnpinData(&iBcb->PFCB);
}
return Result;
@ -573,10 +542,14 @@ CcSetDirtyPinnedData (
IN PVOID Bcb,
IN PLARGE_INTEGER Lsn)
{
PINTERNAL_BCB iBcb = Bcb;
PINTERNAL_BCB iBcb = CONTAINING_RECORD(Bcb, INTERNAL_BCB, PFCB);
CCTRACE(CC_API_DEBUG, "Bcb=%p Lsn=%p\n",
Bcb, Lsn);
CCTRACE(CC_API_DEBUG, "Bcb=%p Lsn=%p\n", Bcb, Lsn);
/* Tell Mm */
MmMakePagesDirty(NULL,
Add2Ptr(iBcb->Vacb->BaseAddress, iBcb->PFCB.MappedFileOffset.QuadPart - iBcb->Vacb->FileOffset.QuadPart),
iBcb->PFCB.MappedLength);
if (!iBcb->Vacb->Dirty)
{
@ -606,8 +579,7 @@ CcUnpinDataForThread (
IN PVOID Bcb,
IN ERESOURCE_THREAD ResourceThreadId)
{
PINTERNAL_BCB iBcb = Bcb;
PROS_SHARED_CACHE_MAP SharedCacheMap;
PINTERNAL_BCB iBcb = CONTAINING_RECORD(Bcb, INTERNAL_BCB, PFCB);
CCTRACE(CC_API_DEBUG, "Bcb=%p ResourceThreadId=%lu\n", Bcb, ResourceThreadId);
@ -617,8 +589,7 @@ CcUnpinDataForThread (
iBcb->PinCount--;
}
SharedCacheMap = iBcb->Vacb->SharedCacheMap;
CcpDereferenceBcb(SharedCacheMap, iBcb);
CcpDereferenceBcb(iBcb->Vacb->SharedCacheMap, iBcb);
}
/*
@ -629,7 +600,7 @@ NTAPI
CcRepinBcb (
IN PVOID Bcb)
{
PINTERNAL_BCB iBcb = Bcb;
PINTERNAL_BCB iBcb = CONTAINING_RECORD(Bcb, INTERNAL_BCB, PFCB);
CCTRACE(CC_API_DEBUG, "Bcb=%p\n", Bcb);
@ -646,7 +617,7 @@ CcUnpinRepinnedBcb (
IN BOOLEAN WriteThrough,
IN PIO_STATUS_BLOCK IoStatus)
{
PINTERNAL_BCB iBcb = Bcb;
PINTERNAL_BCB iBcb = CONTAINING_RECORD(Bcb, INTERNAL_BCB, PFCB);
KIRQL OldIrql;
PROS_SHARED_CACHE_MAP SharedCacheMap;
@ -655,29 +626,25 @@ CcUnpinRepinnedBcb (
SharedCacheMap = iBcb->Vacb->SharedCacheMap;
IoStatus->Status = STATUS_SUCCESS;
if (WriteThrough)
{
CcFlushCache(iBcb->Vacb->SharedCacheMap->FileObject->SectionObjectPointer,
&iBcb->PFCB.MappedFileOffset,
iBcb->PFCB.MappedLength,
IoStatus);
}
else
{
IoStatus->Status = STATUS_SUCCESS;
IoStatus->Information = 0;
}
KeAcquireSpinLock(&SharedCacheMap->BcbSpinLock, &OldIrql);
if (--iBcb->RefCount == 0)
{
RemoveEntryList(&iBcb->BcbEntry);
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
IoStatus->Information = 0;
if (WriteThrough)
{
if (iBcb->Vacb->Dirty)
{
IoStatus->Status = CcRosFlushVacb(iBcb->Vacb);
}
else
{
IoStatus->Status = STATUS_SUCCESS;
}
}
else
{
IoStatus->Status = STATUS_SUCCESS;
}
if (iBcb->PinCount != 0)
{
ExReleaseResourceLite(&iBcb->Lock);
@ -692,7 +659,6 @@ CcUnpinRepinnedBcb (
*/
CcRosReleaseVacb(iBcb->Vacb->SharedCacheMap,
iBcb->Vacb,
TRUE,
FALSE,
FALSE);

File diff suppressed because it is too large Load diff

View file

@ -1970,7 +1970,6 @@ Phase1InitializationDiscard(IN PVOID Context)
InbvEnableDisplayString(TRUE);
/* Launch initial process */
DPRINT("Free non-cache pages: %lx\n", MmAvailablePages + MiMemoryConsumers[MC_CACHE].PagesUsed);
ProcessInfo = &InitBuffer->ProcessInfo;
ExpLoadInitialProcess(InitBuffer, &ProcessParameters, &Environment);
@ -2009,7 +2008,6 @@ Phase1InitializationDiscard(IN PVOID Context)
/* Free the boot buffer */
ExFreePoolWithTag(InitBuffer, TAG_INIT);
DPRINT("Free non-cache pages: %lx\n", MmAvailablePages + MiMemoryConsumers[MC_CACHE].PagesUsed);
}
VOID

View file

@ -278,7 +278,7 @@ ExpGetRawSMBiosTable(
DPRINT1("IoWMIOpenBlock failed: 0x%08lx\n", Status);
return Status;
}
AllData = ExAllocatePoolWithTag(PagedPool, WMIBufSize, 'itfS');
if (AllData == NULL)
{
@ -719,7 +719,6 @@ QSI_DEF(SystemPerformanceInformation)
* Not sure this is right. 8^\
*/
Spi->CommittedPages = MiMemoryConsumers[MC_SYSTEM].PagesUsed +
MiMemoryConsumers[MC_CACHE].PagesUsed +
MiMemoryConsumers[MC_USER].PagesUsed +
MiUsedSwapPages;
/*
@ -767,7 +766,7 @@ QSI_DEF(SystemPerformanceInformation)
Spi->TotalSystemDriverPages = 0; /* FIXME */
Spi->Spare3Count = 0; /* FIXME */
Spi->ResidentSystemCachePage = MiMemoryConsumers[MC_CACHE].PagesUsed;
Spi->ResidentSystemCachePage = MiMemoryConsumers[MC_USER].PagesUsed; /* FIXME */
Spi->ResidentPagedPoolPage = 0; /* FIXME */
Spi->ResidentSystemDriverPage = 0; /* FIXME */
@ -1477,13 +1476,10 @@ QSI_DEF(SystemFileCacheInformation)
RtlZeroMemory(Sci, sizeof(SYSTEM_FILECACHE_INFORMATION));
/* Return the Byte size not the page size. */
Sci->CurrentSize =
MiMemoryConsumers[MC_CACHE].PagesUsed * PAGE_SIZE;
Sci->PeakSize =
MiMemoryConsumers[MC_CACHE].PagesUsed * PAGE_SIZE; /* FIXME */
Sci->CurrentSize = MiMemoryConsumers[MC_USER].PagesUsed; /* FIXME */
Sci->PeakSize = MiMemoryConsumers[MC_USER].PagesUsed; /* FIXME */
/* Taskmgr multiplies this one by page size right away */
Sci->CurrentSizeIncludingTransitionInPages =
MiMemoryConsumers[MC_CACHE].PagesUsed; /* FIXME: Should be */
Sci->CurrentSizeIncludingTransitionInPages = MiMemoryConsumers[MC_USER].PagesUsed; /* FIXME: Should be */
/* system working set and standby pages. */
Sci->PageFaultCount = 0; /* FIXME */
Sci->MinimumWorkingSet = 0; /* FIXME */

View file

@ -1714,9 +1714,13 @@ FsRtlAcquireFileForCcFlushEx(IN PFILE_OBJECT FileObject)
/* Return either success or inability to wait.
In case of other failure - fall through */
if (Status == STATUS_SUCCESS ||
Status == STATUS_CANT_WAIT)
if (NT_SUCCESS(Status))
return Status;
if (Status == STATUS_CANT_WAIT)
{
DPRINT1("STATUS_CANT_WAIT\n");
FsRtlExitFileSystem();
return Status;
}
}

View file

@ -175,10 +175,13 @@ typedef struct _ROS_SHARED_CACHE_MAP
LARGE_INTEGER FileSize;
LIST_ENTRY BcbList;
LARGE_INTEGER SectionSize;
LARGE_INTEGER ValidDataLength;
PFILE_OBJECT FileObject;
ULONG DirtyPages;
LIST_ENTRY SharedCacheMapLinks;
ULONG Flags;
PVOID Section;
PKEVENT CreateEvent;
PCACHE_MANAGER_CALLBACKS Callbacks;
PVOID LazyWriteContext;
LIST_ENTRY PrivateList;
@ -197,15 +200,13 @@ typedef struct _ROS_SHARED_CACHE_MAP
#define READAHEAD_DISABLED 0x1
#define WRITEBEHIND_DISABLED 0x2
#define SHARED_CACHE_MAP_IN_CREATION 0x4
#define SHARED_CACHE_MAP_IN_LAZYWRITE 0x8
typedef struct _ROS_VACB
{
/* Base address of the region where the view's data is mapped. */
PVOID BaseAddress;
/* Memory area representing the region where the view's data is mapped. */
struct _MEMORY_AREA* MemoryArea;
/* Are the contents of the view valid. */
BOOLEAN Valid;
/* Are the contents of the view newer than those on disk. */
BOOLEAN Dirty;
/* Page out in progress */
@ -308,20 +309,24 @@ CcMdlWriteComplete2(
);
NTSTATUS
NTAPI
CcRosFlushVacb(PROS_VACB Vacb);
CcRosFlushVacb(PROS_VACB Vacb, PIO_STATUS_BLOCK Iosb);
NTSTATUS
NTAPI
CcRosGetVacb(
PROS_SHARED_CACHE_MAP SharedCacheMap,
LONGLONG FileOffset,
PLONGLONG BaseOffset,
PVOID *BaseAddress,
PBOOLEAN UptoDate,
PROS_VACB *Vacb
);
BOOLEAN
CcRosEnsureVacbResident(
_In_ PROS_VACB Vacb,
_In_ BOOLEAN Wait,
_In_ BOOLEAN NoRead,
_In_ ULONG Offset,
_In_ ULONG Length
);
VOID
NTAPI
CcInitView(VOID);
@ -330,28 +335,10 @@ VOID
NTAPI
CcShutdownLazyWriter(VOID);
NTSTATUS
NTAPI
CcReadVirtualAddress(PROS_VACB Vacb);
NTSTATUS
NTAPI
CcWriteVirtualAddress(PROS_VACB Vacb);
BOOLEAN
NTAPI
CcInitializeCacheManager(VOID);
NTSTATUS
NTAPI
CcRosUnmapVacb(
PROS_SHARED_CACHE_MAP SharedCacheMap,
LONGLONG FileOffset,
BOOLEAN NowDirty
);
PROS_VACB
NTAPI
CcRosLookupVacb(
PROS_SHARED_CACHE_MAP SharedCacheMap,
LONGLONG FileOffset
@ -361,26 +348,16 @@ VOID
NTAPI
CcInitCacheZeroPage(VOID);
NTSTATUS
NTAPI
CcRosMarkDirtyFile(
PROS_SHARED_CACHE_MAP SharedCacheMap,
LONGLONG FileOffset
);
VOID
NTAPI
CcRosMarkDirtyVacb(
PROS_VACB Vacb);
VOID
NTAPI
CcRosUnmarkDirtyVacb(
PROS_VACB Vacb,
BOOLEAN LockViews);
NTSTATUS
NTAPI
CcRosFlushDirtyPages(
ULONG Target,
PULONG Count,
@ -389,39 +366,27 @@ CcRosFlushDirtyPages(
);
VOID
NTAPI
CcRosDereferenceCache(PFILE_OBJECT FileObject);
VOID
NTAPI
CcRosReferenceCache(PFILE_OBJECT FileObject);
VOID
NTAPI
CcRosRemoveIfClosed(PSECTION_OBJECT_POINTERS SectionObjectPointer);
NTSTATUS
NTAPI
CcRosReleaseVacb(
PROS_SHARED_CACHE_MAP SharedCacheMap,
PROS_VACB Vacb,
BOOLEAN Valid,
BOOLEAN Dirty,
BOOLEAN Mapped
);
NTSTATUS
NTAPI
CcRosRequestVacb(
PROS_SHARED_CACHE_MAP SharedCacheMap,
LONGLONG FileOffset,
PVOID* BaseAddress,
PBOOLEAN UptoDate,
PROS_VACB *Vacb
);
NTSTATUS
NTAPI
CcRosInitializeFileCache(
PFILE_OBJECT FileObject,
PCC_FILE_SIZES FileSizes,
@ -431,7 +396,6 @@ CcRosInitializeFileCache(
);
NTSTATUS
NTAPI
CcRosReleaseFileCache(
PFILE_OBJECT FileObject
);

View file

@ -148,3 +148,11 @@ NTSTATUS
NTAPI
FsRtlAcquireToCreateMappedSection(_In_ PFILE_OBJECT FileObject,
_In_ ULONG SectionPageProtection);
VOID
NTAPI
FsRtlReleaseFileForCcFlush(IN PFILE_OBJECT FileObject);
NTSTATUS
NTAPI
FsRtlAcquireFileForCcFlushEx(IN PFILE_OBJECT FileObject);

View file

@ -69,7 +69,9 @@ typedef ULONG_PTR SWAPENTRY;
#endif
#define MEMORY_AREA_SECTION_VIEW (1)
#ifdef NEWCC
#define MEMORY_AREA_CACHE (2)
#endif
#define MEMORY_AREA_OWNED_BY_ARM3 (15)
#define MEMORY_AREA_STATIC (0x80000000)
@ -87,13 +89,9 @@ typedef ULONG_PTR SWAPENTRY;
#define SEC_PHYSICALMEMORY (0x80000000)
#define MM_PAGEFILE_SEGMENT (0x1)
#define MM_DATAFILE_SEGMENT (0x2)
#define MC_CACHE (0)
#define MC_USER (1)
#define MC_SYSTEM (2)
#define MC_MAXIMUM (3)
#define MC_USER (0)
#define MC_SYSTEM (1)
#define MC_MAXIMUM (2)
#define PAGED_POOL_MASK 1
#define MUST_SUCCEED_POOL_MASK 2
@ -165,14 +163,15 @@ typedef ULONG_PTR SWAPENTRY;
typedef struct _MM_SECTION_SEGMENT
{
PFILE_OBJECT FileObject;
FAST_MUTEX Lock; /* lock which protects the page directory */
PFILE_OBJECT FileObject;
LARGE_INTEGER RawLength; /* length of the segment which is part of the mapped file */
LARGE_INTEGER Length; /* absolute length of the segment */
ULONG ReferenceCount;
ULONG CacheCount;
PLONG64 ReferenceCount;
ULONG SectionCount;
ULONG Protection;
ULONG Flags;
PULONG Flags;
BOOLEAN WriteCopy;
BOOLEAN Locked;
@ -183,32 +182,32 @@ typedef struct _MM_SECTION_SEGMENT
ULONG Characteristics;
} Image;
LIST_ENTRY ListOfSegments;
LONG64 RefCount;
ULONG SegFlags;
ULONGLONG LastPage;
RTL_GENERIC_TABLE PageTable;
} MM_SECTION_SEGMENT, *PMM_SECTION_SEGMENT;
typedef struct _MM_IMAGE_SECTION_OBJECT
{
PFILE_OBJECT FileObject;
LONG64 RefCount;
ULONG SegFlags;
SECTION_IMAGE_INFORMATION ImageInformation;
PVOID BasedAddress;
ULONG NrSegments;
PMM_SECTION_SEGMENT Segments;
} MM_IMAGE_SECTION_OBJECT, *PMM_IMAGE_SECTION_OBJECT;
typedef struct _ROS_SECTION_OBJECT
{
CSHORT Type;
CSHORT Size;
LARGE_INTEGER MaximumSize;
ULONG SectionPageProtection;
ULONG AllocationAttributes;
PFILE_OBJECT FileObject;
union
{
PMM_IMAGE_SECTION_OBJECT ImageSection;
PMM_SECTION_SEGMENT Segment;
};
} ROS_SECTION_OBJECT, *PROS_SECTION_OBJECT;
#define MM_PHYSICALMEMORY_SEGMENT (0x1)
#define MM_DATAFILE_SEGMENT (0x2)
#define MM_SEGMENT_INDELETE (0x4)
#define MM_SEGMENT_INCREATE (0x8)
#define MA_GetStartingAddress(_MemoryArea) ((_MemoryArea)->VadNode.StartingVpn << PAGE_SHIFT)
#define MA_GetEndingAddress(_MemoryArea) (((_MemoryArea)->VadNode.EndingVpn + 1) << PAGE_SHIFT)
@ -218,25 +217,17 @@ typedef struct _MEMORY_AREA
MMVAD VadNode;
ULONG Type;
ULONG Protect;
ULONG Flags;
BOOLEAN DeleteInProgress;
ULONG Magic;
PVOID Vad;
union
struct
{
struct
{
ROS_SECTION_OBJECT* Section;
LARGE_INTEGER ViewOffset;
PMM_SECTION_SEGMENT Segment;
LIST_ENTRY RegionListHead;
} SectionData;
struct
{
LIST_ENTRY RegionListHead;
} VirtualMemoryData;
} Data;
LONGLONG ViewOffset;
PMM_SECTION_SEGMENT Segment;
LIST_ENTRY RegionListHead;
} SectionData;
} MEMORY_AREA, *PMEMORY_AREA;
typedef struct _MM_RMAP_ENTRY
@ -368,6 +359,8 @@ typedef struct _MMPFN
// HACK until WS lists are supported
MMWSLE Wsle;
struct _MMPFN* NextLRU;
struct _MMPFN* PreviousLRU;
} MMPFN, *PMMPFN;
extern PMMPFN MmPfnDatabase;
@ -777,15 +770,6 @@ MmAccessFault(
IN PVOID TrapInformation
);
/* kmap.c ********************************************************************/
NTSTATUS
NTAPI
MiCopyFromUserPage(
PFN_NUMBER DestPage,
const VOID *SrcAddress
);
/* process.c *****************************************************************/
PVOID
@ -837,6 +821,8 @@ NTAPI
MmRebalanceMemoryConsumers(VOID);
/* rmap.c **************************************************************/
#define RMAP_SEGMENT_MASK ~((ULONG_PTR)0xff)
#define RMAP_IS_SEGMENT(x) (((ULONG_PTR)(x) & RMAP_SEGMENT_MASK) == RMAP_SEGMENT_MASK)
VOID
NTAPI
@ -880,11 +866,6 @@ MmInitializeRmapList(VOID);
VOID
NTAPI
MmSetCleanAllRmaps(PFN_NUMBER Page);
VOID
NTAPI
MmSetDirtyAllRmaps(PFN_NUMBER Page);
BOOLEAN
NTAPI
MmIsDirtyPageRmap(PFN_NUMBER Page);
@ -893,6 +874,11 @@ NTSTATUS
NTAPI
MmPageOutPhysicalAddress(PFN_NUMBER Page);
PMM_SECTION_SEGMENT
NTAPI
MmGetSectionAssociation(PFN_NUMBER Page,
PLARGE_INTEGER Offset);
/* freelist.c **********************************************************/
FORCEINLINE
@ -966,20 +952,12 @@ MiGetPfnEntryIndex(IN PMMPFN Pfn1)
PFN_NUMBER
NTAPI
MmGetLRUNextUserPage(PFN_NUMBER PreviousPage);
MmGetLRUNextUserPage(PFN_NUMBER PreviousPage, BOOLEAN MoveToLast);
PFN_NUMBER
NTAPI
MmGetLRUFirstUserPage(VOID);
VOID
NTAPI
MmInsertLRULastUserPage(PFN_NUMBER Page);
VOID
NTAPI
MmRemoveLRUUserPage(PFN_NUMBER Page);
VOID
NTAPI
MmDumpArmPfnDatabase(
@ -1204,6 +1182,14 @@ MmIsDirtyPage(
PVOID Address
);
VOID
NTAPI
MmClearPageAccessedBit(PEPROCESS Process, PVOID Address);
BOOLEAN
NTAPI
MmIsPageAccessed(PEPROCESS Process, PVOID Address);
/* wset.c ********************************************************************/
NTSTATUS
@ -1248,6 +1234,41 @@ MmFindRegion(
/* section.c *****************************************************************/
#define PFN_FROM_SSE(E) ((PFN_NUMBER)((E) >> PAGE_SHIFT))
#define IS_SWAP_FROM_SSE(E) ((E) & 0x00000001)
#define MM_IS_WAIT_PTE(E) \
(IS_SWAP_FROM_SSE(E) && SWAPENTRY_FROM_SSE(E) == MM_WAIT_ENTRY)
#define MAKE_PFN_SSE(P) ((ULONG_PTR)((P) << PAGE_SHIFT))
#define SWAPENTRY_FROM_SSE(E) ((E) >> 1)
#define MAKE_SWAP_SSE(S) (((ULONG_PTR)(S) << 1) | 0x1)
#define DIRTY_SSE(E) ((E) | 2)
#define CLEAN_SSE(E) ((E) & ~2)
#define IS_DIRTY_SSE(E) ((E) & 2)
#define WRITE_SSE(E) ((E) | 4)
#define IS_WRITE_SSE(E) ((E) & 4)
#define PAGE_FROM_SSE(E) ((E) & 0xFFFFF000)
#define SHARE_COUNT_FROM_SSE(E) (((E) & 0x00000FFC) >> 3)
#define MAX_SHARE_COUNT 0x1FF
#define MAKE_SSE(P, C) ((ULONG_PTR)((P) | ((C) << 3)))
#define BUMPREF_SSE(E) (PAGE_FROM_SSE(E) | ((SHARE_COUNT_FROM_SSE(E) + 1) << 3) | ((E) & 0x7))
#define DECREF_SSE(E) (PAGE_FROM_SSE(E) | ((SHARE_COUNT_FROM_SSE(E) - 1) << 3) | ((E) & 0x7))
VOID
NTAPI
_MmLockSectionSegment(PMM_SECTION_SEGMENT Segment,
const char *file,
int line);
#define MmLockSectionSegment(x) _MmLockSectionSegment(x,__FILE__,__LINE__)
VOID
NTAPI
_MmUnlockSectionSegment(PMM_SECTION_SEGMENT Segment,
const char *file,
int line);
#define MmUnlockSectionSegment(x) _MmUnlockSectionSegment(x,__FILE__,__LINE__)
VOID
NTAPI
MmGetImageInformation(
@ -1306,15 +1327,6 @@ MmNotPresentFaultSectionView(
BOOLEAN Locked
);
NTSTATUS
NTAPI
MmPageOutSectionView(
PMMSUPPORT AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
ULONG_PTR Entry
);
NTSTATUS
NTAPI
MmCreatePhysicalMemorySection(VOID);
@ -1331,6 +1343,110 @@ VOID
NTAPI
MmFreeSectionSegments(PFILE_OBJECT FileObject);
/* Exported from NT 6.2 Onward. We keep it internal. */
NTSTATUS
NTAPI
MmMapViewInSystemSpaceEx (
_In_ PVOID Section,
_Outptr_result_bytebuffer_ (*ViewSize) PVOID *MappedBase,
_Inout_ PSIZE_T ViewSize,
_Inout_ PLARGE_INTEGER SectionOffset,
_In_ ULONG_PTR Flags
);
BOOLEAN
NTAPI
MmArePagesResident(
_In_ PEPROCESS Process,
_In_ PVOID BaseAddress,
_In_ ULONG Length);
NTSTATUS
NTAPI
MmMakePagesDirty(
_In_ PEPROCESS Process,
_In_ PVOID Address,
_In_ ULONG Length);
NTSTATUS
NTAPI
MmRosFlushVirtualMemory(
_In_ PEPROCESS Process,
_Inout_ PVOID* Address,
_Inout_ PSIZE_T Length,
_Out_ PIO_STATUS_BLOCK Iosb);
NTSTATUS
NTAPI
MmFlushSegment(
_In_ PSECTION_OBJECT_POINTERS SectionObjectPointer,
_In_opt_ PLARGE_INTEGER Offset,
_In_ ULONG Length,
_In_opt_ PIO_STATUS_BLOCK Iosb);
NTSTATUS
NTAPI
MmMakeDataSectionResident(
_In_ PSECTION_OBJECT_POINTERS SectionObjectPointer,
_In_ LONGLONG Offset,
_In_ ULONG Length,
_In_ PLARGE_INTEGER ValidDataLength);
BOOLEAN
NTAPI
MmPurgeSegment(
_In_ PSECTION_OBJECT_POINTERS SectionObjectPointer,
_In_opt_ PLARGE_INTEGER Offset,
_In_ ULONG Length);
BOOLEAN
NTAPI
MmCheckDirtySegment(
PMM_SECTION_SEGMENT Segment,
PLARGE_INTEGER Offset,
BOOLEAN ForceDirty,
BOOLEAN PageOut);
BOOLEAN
NTAPI
MmUnsharePageEntrySectionSegment(PMEMORY_AREA MemoryArea,
PMM_SECTION_SEGMENT Segment,
PLARGE_INTEGER Offset,
BOOLEAN Dirty,
BOOLEAN PageOut,
ULONG_PTR *InEntry);
VOID
NTAPI
MmDereferenceSegment(PMM_SECTION_SEGMENT Segment);
NTSTATUS
NTAPI
MmExtendSection(
_In_ PVOID Section,
_Inout_ PLARGE_INTEGER NewSize);
/* sptab.c *******************************************************************/
NTSTATUS
NTAPI
_MmSetPageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
PLARGE_INTEGER Offset,
ULONG_PTR Entry,
const char *file,
int line);
ULONG_PTR
NTAPI
_MmGetPageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
PLARGE_INTEGER Offset,
const char *file,
int line);
#define MmSetPageEntrySectionSegment(S,O,E) _MmSetPageEntrySectionSegment(S,O,E,__FILE__,__LINE__)
#define MmGetPageEntrySectionSegment(S,O) _MmGetPageEntrySectionSegment(S,O,__FILE__,__LINE__)
/* sysldr.c ******************************************************************/
VOID

View file

@ -63,7 +63,9 @@
#include <regstr.h>
#include <ntstrsafe.h>
#include <ntpoapi.h>
#define ENABLE_INTSAFE_SIGNED_FUNCTIONS
#include <ntintsafe.h>
#undef ENABLE_INTSAFE_SIGNED_FUNCTIONS
/* C Headers */
#include <stdlib.h>

View file

@ -3014,10 +3014,8 @@ NtReadFile(IN HANDLE FileHandle,
/* Now set the deferred read flags */
Irp->Flags |= (IRP_READ_OPERATION | IRP_DEFER_IO_COMPLETION);
#if 0
/* FIXME: VFAT SUCKS */
if (FileObject->Flags & FO_NO_INTERMEDIATE_BUFFERING) Irp->Flags |= IRP_NOCACHE;
#endif
/* Perform the call */
return IopPerformSynchronousRequest(DeviceObject,
@ -4082,10 +4080,8 @@ NtWriteFile(IN HANDLE FileHandle,
/* Now set the deferred read flags */
Irp->Flags |= (IRP_WRITE_OPERATION | IRP_DEFER_IO_COMPLETION);
#if 0
/* FIXME: VFAT SUCKS */
if (FileObject->Flags & FO_NO_INTERMEDIATE_BUFFERING) Irp->Flags |= IRP_NOCACHE;
#endif
/* Perform the call */
return IopPerformSynchronousRequest(DeviceObject,

View file

@ -1071,9 +1071,8 @@ FORCEINLINE
BOOLEAN
MiIsRosSectionObject(IN PVOID Section)
{
PROS_SECTION_OBJECT RosSection = Section;
if ((RosSection->Type == 'SC') && (RosSection->Size == 'TN')) return TRUE;
return FALSE;
PSECTION RosSection = Section;
return RosSection->u.Flags.filler;
}
#define MI_IS_ROS_PFN(x) ((x)->u4.AweAllocation == TRUE)

View file

@ -941,7 +941,6 @@ MmInitializeProcessAddressSpace(IN PEPROCESS Process,
NTSTATUS Status = STATUS_SUCCESS;
SIZE_T ViewSize = 0;
PVOID ImageBase = 0;
PROS_SECTION_OBJECT SectionObject = Section;
PMMPTE PointerPte;
KIRQL OldIrql;
PMMPDE PointerPde;
@ -1048,10 +1047,11 @@ MmInitializeProcessAddressSpace(IN PEPROCESS Process,
#endif
/* Check if there's a Section Object */
if (SectionObject)
if (Section)
{
/* Determine the image file name and save it to EPROCESS */
FileName = SectionObject->FileObject->FileName;
PFILE_OBJECT FileObject = MmGetFileObjectForSection(Section);
FileName = FileObject->FileName;
Source = (PWCHAR)((PCHAR)FileName.Buffer + FileName.Length);
if (FileName.Buffer)
{
@ -1083,9 +1083,7 @@ MmInitializeProcessAddressSpace(IN PEPROCESS Process,
if (AuditName)
{
/* Setup the audit name */
Status = SeInitializeProcessAuditName(SectionObject->FileObject,
FALSE,
AuditName);
Status = SeInitializeProcessAuditName(FileObject, FALSE, AuditName);
if (!NT_SUCCESS(Status))
{
/* Fail */

View file

@ -412,16 +412,20 @@ MiInsertInSystemSpace(IN PMMSESSION Session,
return Base;
}
static
NTSTATUS
NTAPI
MiAddMappedPtes(IN PMMPTE FirstPte,
IN PFN_NUMBER PteCount,
IN PCONTROL_AREA ControlArea)
IN PCONTROL_AREA ControlArea,
IN LONGLONG SectionOffset)
{
MMPTE TempPte;
PMMPTE PointerPte, ProtoPte, LastProtoPte, LastPte;
PSUBSECTION Subsection;
/* Mapping at offset not supported yet */
ASSERT(SectionOffset == 0);
/* ARM3 doesn't support this yet */
ASSERT(ControlArea->u.Flags.GlobalOnlyPerSession == 0);
ASSERT(ControlArea->u.Flags.Rom == 0);
@ -829,12 +833,17 @@ MiUnmapViewOfSection(IN PEPROCESS Process,
PEPROCESS CurrentProcess = PsGetCurrentProcess();
PAGED_CODE();
/* Check if we need to lock the address space */
if (!Flags) MmLockAddressSpace(&Process->Vm);
/* Check for Mm Region */
MemoryArea = MmLocateMemoryAreaByAddress(&Process->Vm, BaseAddress);
if ((MemoryArea) && (MemoryArea->Type != MEMORY_AREA_OWNED_BY_ARM3))
{
/* Call Mm API */
return MiRosUnmapViewOfSection(Process, BaseAddress, Process->ProcessExiting);
NTSTATUS Status = MiRosUnmapViewOfSection(Process, BaseAddress, Process->ProcessExiting);
if (!Flags) MmUnlockAddressSpace(&Process->Vm);
return Status;
}
/* Check if we should attach to the process */
@ -845,10 +854,7 @@ MiUnmapViewOfSection(IN PEPROCESS Process,
Attached = TRUE;
}
/* Check if we need to lock the address space */
if (!Flags) MmLockAddressSpace(&Process->Vm);
/* Check if the process is already daed */
/* Check if the process is already dead */
if (Process->VmDeleted)
{
/* Fail the call */
@ -1048,15 +1054,17 @@ _WARN("MiSessionCommitPageTables halfplemented for amd64")
}
NTSTATUS
NTAPI
MiMapViewInSystemSpace(IN PVOID Section,
IN PMMSESSION Session,
OUT PVOID *MappedBase,
IN OUT PSIZE_T ViewSize)
MiMapViewInSystemSpace(
_In_ PVOID Section,
_In_ PMMSESSION Session,
_Outptr_result_bytebuffer_ (*ViewSize) PVOID *MappedBase,
_Inout_ PSIZE_T ViewSize,
_Inout_ PLARGE_INTEGER SectionOffset)
{
PVOID Base;
PCONTROL_AREA ControlArea;
ULONG Buckets, SectionSize;
ULONG Buckets;
LONGLONG SectionSize;
NTSTATUS Status;
PAGED_CODE();
@ -1073,13 +1081,31 @@ MiMapViewInSystemSpace(IN PVOID Section,
ASSERT(NT_SUCCESS(Status));
/* Get the section size at creation time */
SectionSize = ((PSECTION)Section)->SizeOfSection.LowPart;
SectionSize = ((PSECTION)Section)->SizeOfSection.QuadPart;
/* If the caller didn't specify a view size, assume the whole section */
if (!(*ViewSize)) *ViewSize = SectionSize;
/* If the caller didn't specify a view size, assume until the end of the section */
if (!(*ViewSize))
{
/* Check for overflow first */
if ((SectionSize - SectionOffset->QuadPart) > SIZE_T_MAX)
{
DPRINT1("Section end is too far away from the specified offset.\n");
MiDereferenceControlArea(ControlArea);
return STATUS_INVALID_VIEW_SIZE;
}
*ViewSize = SectionSize - SectionOffset->QuadPart;
}
/* Check overflow */
if ((SectionOffset->QuadPart + *ViewSize) < SectionOffset->QuadPart)
{
DPRINT1("Integer overflow between size & offset!\n");
MiDereferenceControlArea(ControlArea);
return STATUS_INVALID_VIEW_SIZE;
}
/* Check if the caller wanted a larger section than the view */
if (*ViewSize > SectionSize)
if (SectionOffset->QuadPart + *ViewSize > SectionSize)
{
/* Fail */
DPRINT1("View is too large\n");
@ -1129,7 +1155,8 @@ MiMapViewInSystemSpace(IN PVOID Section,
/* Create the actual prototype PTEs for this mapping */
Status = MiAddMappedPtes(MiAddressToPte(Base),
BYTES_TO_PAGES(*ViewSize),
ControlArea);
ControlArea,
SectionOffset->QuadPart);
ASSERT(NT_SUCCESS(Status));
/* Return the base adress of the mapping and success */
@ -1216,6 +1243,8 @@ MiLoadUserSymbols(IN PCONTROL_AREA ControlArea,
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
ExFreePoolWithTag(LdrEntry, 'bDmM');
ExReleaseResourceLite(&PsLoadedModuleResource);
KeLeaveCriticalRegion();
_SEH2_YIELD(return);
}
_SEH2_END;
@ -1655,20 +1684,23 @@ MiGetFileObjectForSectionAddress(
if (Vad->u.VadFlags.Spare != 0)
{
PMEMORY_AREA MemoryArea = (PMEMORY_AREA)Vad;
PROS_SECTION_OBJECT Section;
/* Check if it's a section view (RosMm section) */
if (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW)
{
/* Get the section pointer to the SECTION_OBJECT */
Section = MemoryArea->Data.SectionData.Section;
*FileObject = Section->FileObject;
*FileObject = MemoryArea->SectionData.Segment->FileObject;
}
else
{
#ifdef NEWCC
ASSERT(MemoryArea->Type == MEMORY_AREA_CACHE);
DPRINT1("Address is a cache section!\n");
return STATUS_SECTION_NOT_IMAGE;
#else
ASSERT(FALSE);
return STATUS_SECTION_NOT_IMAGE;
#endif
}
}
else
@ -1700,7 +1732,7 @@ PFILE_OBJECT
NTAPI
MmGetFileObjectForSection(IN PVOID SectionObject)
{
PSECTION_OBJECT Section;
PSECTION Section = SectionObject;
ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
ASSERT(SectionObject != NULL);
@ -1708,12 +1740,11 @@ MmGetFileObjectForSection(IN PVOID SectionObject)
if (MiIsRosSectionObject(SectionObject) == FALSE)
{
/* Return the file pointer stored in the control area */
Section = SectionObject;
return Section->Segment->ControlArea->FilePointer;
}
/* Return the file object */
return ((PROS_SECTION_OBJECT)SectionObject)->FileObject;
return ((PMM_SECTION_SEGMENT)Section->Segment)->FileObject;
}
static
@ -1728,19 +1759,21 @@ MiGetFileObjectForVad(
if (Vad->u.VadFlags.Spare != 0)
{
PMEMORY_AREA MemoryArea = (PMEMORY_AREA)Vad;
PROS_SECTION_OBJECT Section;
/* Check if it's a section view (RosMm section) */
if (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW)
{
/* Get the section pointer to the SECTION_OBJECT */
Section = MemoryArea->Data.SectionData.Section;
FileObject = Section->FileObject;
FileObject = MemoryArea->SectionData.Segment->FileObject;
}
else
{
#ifdef NEWCC
ASSERT(MemoryArea->Type == MEMORY_AREA_CACHE);
DPRINT1("VAD is a cache section!\n");
#else
ASSERT(FALSE);
#endif
return NULL;
}
}
@ -1773,15 +1806,21 @@ VOID
NTAPI
MmGetImageInformation (OUT PSECTION_IMAGE_INFORMATION ImageInformation)
{
PSECTION_OBJECT SectionObject;
PSECTION SectionObject;
/* Get the section object of this process*/
SectionObject = PsGetCurrentProcess()->SectionObject;
ASSERT(SectionObject != NULL);
ASSERT(MiIsRosSectionObject(SectionObject) == TRUE);
if (SectionObject->u.Flags.Image == 0)
{
RtlZeroMemory(ImageInformation, sizeof(*ImageInformation));
return;
}
/* Return the image information */
*ImageInformation = ((PROS_SECTION_OBJECT)SectionObject)->ImageSection->ImageInformation;
*ImageInformation = ((PMM_IMAGE_SECTION_OBJECT)SectionObject->Segment)->ImageInformation;
}
NTSTATUS
@ -1822,19 +1861,10 @@ MmGetFileNameForSection(IN PVOID Section,
OUT POBJECT_NAME_INFORMATION *ModuleName)
{
PFILE_OBJECT FileObject;
PSECTION SectionObject = Section;
/* Make sure it's an image section */
if (MiIsRosSectionObject(Section) == FALSE)
{
/* Check ARM3 Section flag */
if (((PSECTION)Section)->u.Flags.Image == 0)
{
/* It's not, fail */
DPRINT1("Not an image section\n");
return STATUS_SECTION_NOT_IMAGE;
}
}
else if (!(((PROS_SECTION_OBJECT)Section)->AllocationAttributes & SEC_IMAGE))
if (SectionObject->u.Flags.Image == 0)
{
/* It's not, fail */
DPRINT1("Not an image section\n");
@ -2916,7 +2946,7 @@ MmMapViewOfArm3Section(IN PVOID SectionObject,
if (!(*ViewSize))
{
/* Compute it for the caller */
CalculatedViewSize = Section->SizeOfSection.QuadPart -
CalculatedViewSize = Section->SizeOfSection.QuadPart -
SectionOffset->QuadPart;
/* Check if it's larger than 4GB or overflows into kernel-mode */
@ -3015,6 +3045,7 @@ MmMapViewInSessionSpace(IN PVOID Section,
IN OUT PSIZE_T ViewSize)
{
PAGED_CODE();
LARGE_INTEGER SectionOffset;
// HACK
if (MiIsRosSectionObject(Section))
@ -3031,10 +3062,12 @@ MmMapViewInSessionSpace(IN PVOID Section,
/* Use the system space API, but with the session view instead */
ASSERT(MmIsAddressValid(MmSessionSpace) == TRUE);
SectionOffset.QuadPart = 0;
return MiMapViewInSystemSpace(Section,
&MmSessionSpace->Session,
MappedBase,
ViewSize);
ViewSize,
&SectionOffset);
}
/*
@ -3087,11 +3120,15 @@ MmUnmapViewInSystemSpace(IN PVOID MappedBase)
PAGED_CODE();
/* Was this mapped by RosMm? */
MmLockAddressSpace(MmGetKernelAddressSpace());
MemoryArea = MmLocateMemoryAreaByAddress(MmGetKernelAddressSpace(), MappedBase);
if ((MemoryArea) && (MemoryArea->Type != MEMORY_AREA_OWNED_BY_ARM3))
{
return MiRosUnmapViewInSystemSpace(MappedBase);
NTSTATUS Status = MiRosUnmapViewInSystemSpace(MappedBase);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
return Status;
}
MmUnlockAddressSpace(MmGetKernelAddressSpace());
/* It was not, call the ARM3 routine */
return MiUnmapViewInSystemSpace(&MmSession, MappedBase);
@ -3585,7 +3622,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
PVOID SafeBaseAddress;
LARGE_INTEGER SafeSectionOffset;
SIZE_T SafeViewSize;
PROS_SECTION_OBJECT Section;
PSECTION Section;
PEPROCESS Process;
NTSTATUS Status;
ACCESS_MASK DesiredAccess;
@ -3715,8 +3752,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
return Status;
}
if (MiIsRosSectionObject(Section) &&
(Section->AllocationAttributes & SEC_PHYSICALMEMORY))
if (Section->u.Flags.PhysicalMemory)
{
if (PreviousMode == UserMode &&
SafeSectionOffset.QuadPart + SafeViewSize > MmHighestPhysicalPage << PAGE_SHIFT)
@ -3764,8 +3800,7 @@ NtMapViewOfSection(IN HANDLE SectionHandle,
if (NT_SUCCESS(Status))
{
/* Check if this is an image for the current process */
if (MiIsRosSectionObject(Section) &&
(Section->AllocationAttributes & SEC_IMAGE) &&
if ((Section->u.Flags.Image) &&
(Process == PsGetCurrentProcess()) &&
(Status != STATUS_IMAGE_NOT_AT_BASE))
{
@ -3836,7 +3871,7 @@ NtExtendSection(IN HANDLE SectionHandle,
IN OUT PLARGE_INTEGER NewMaximumSize)
{
LARGE_INTEGER SafeNewMaximumSize;
PROS_SECTION_OBJECT Section;
PSECTION Section;
NTSTATUS Status;
KPROCESSOR_MODE PreviousMode = ExGetPreviousMode();
@ -3872,30 +3907,24 @@ NtExtendSection(IN HANDLE SectionHandle,
NULL);
if (!NT_SUCCESS(Status)) return Status;
/* Really this should go in MmExtendSection */
if (!(Section->AllocationAttributes & SEC_FILE))
{
DPRINT1("Not extending a file\n");
ObDereferenceObject(Section);
return STATUS_SECTION_NOT_EXTENDED;
}
/* FIXME: Do the work */
Status = MmExtendSection(Section, &SafeNewMaximumSize);
/* Dereference the section */
ObDereferenceObject(Section);
/* Enter SEH */
_SEH2_TRY
if (NT_SUCCESS(Status))
{
/* Write back the new size */
*NewMaximumSize = SafeNewMaximumSize;
_SEH2_TRY
{
/* Write back the new size */
*NewMaximumSize = SafeNewMaximumSize;
}
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
Status = _SEH2_GetExceptionCode();
}
_SEH2_END;
}
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
/* Nothing to do */
}
_SEH2_END;
/* Return the status */
return STATUS_NOT_IMPLEMENTED;

View file

@ -81,13 +81,13 @@ MiCacheImageSymbols(IN PVOID BaseAddress)
NTSTATUS
NTAPI
MiLoadImageSection(IN OUT PVOID *SectionPtr,
OUT PVOID *ImageBase,
IN PUNICODE_STRING FileName,
IN BOOLEAN SessionLoad,
IN PLDR_DATA_TABLE_ENTRY LdrEntry)
MiLoadImageSection(_Inout_ PSECTION *SectionPtr,
_Out_ PVOID *ImageBase,
_In_ PUNICODE_STRING FileName,
_In_ BOOLEAN SessionLoad,
_In_ PLDR_DATA_TABLE_ENTRY LdrEntry)
{
PROS_SECTION_OBJECT Section = *SectionPtr;
PSECTION Section = *SectionPtr;
NTSTATUS Status;
PEPROCESS Process;
PVOID Base = NULL;
@ -158,7 +158,7 @@ MiLoadImageSection(IN OUT PVOID *SectionPtr,
}
/* Reserve system PTEs needed */
PteCount = ROUND_TO_PAGES(Section->ImageSection->ImageInformation.ImageFileSize) >> PAGE_SHIFT;
PteCount = ROUND_TO_PAGES(((PMM_IMAGE_SECTION_OBJECT)Section->Segment)->ImageInformation.ImageFileSize) >> PAGE_SHIFT;
PointerPte = MiReserveSystemPtes(PteCount, SystemPteSpace);
if (!PointerPte)
{
@ -2837,7 +2837,7 @@ MmLoadSystemImage(IN PUNICODE_STRING FileName,
PWCHAR MissingDriverName;
HANDLE SectionHandle;
ACCESS_MASK DesiredAccess;
PVOID Section = NULL;
PSECTION Section = NULL;
BOOLEAN LockOwned = FALSE;
PLIST_ENTRY NextEntry;
IMAGE_INFO ImageInfo;
@ -3054,7 +3054,7 @@ LoaderScan:
SECTION_MAP_EXECUTE,
MmSectionObjectType,
KernelMode,
&Section,
(PVOID*)&Section,
NULL);
ZwClose(SectionHandle);
if (!NT_SUCCESS(Status)) goto Quickie;
@ -3085,7 +3085,7 @@ LoaderScan:
ASSERT(Status != STATUS_ALREADY_COMMITTED);
/* Get the size of the driver */
DriverSize = ((PROS_SECTION_OBJECT)Section)->ImageSection->ImageInformation.ImageFileSize;
DriverSize = ((PMM_IMAGE_SECTION_OBJECT)Section->Segment)->ImageInformation.ImageFileSize;
/* Make sure we're not being loaded into session space */
if (!Flags)

View file

@ -1356,12 +1356,8 @@ MmFlushVirtualMemory(IN PEPROCESS Process,
OUT PIO_STATUS_BLOCK IoStatusBlock)
{
PAGED_CODE();
UNIMPLEMENTED;
//
// Fake success
//
return STATUS_SUCCESS;
/* For now we call the old Mm */
return MmRosFlushVirtualMemory(Process, BaseAddress, RegionSize, IoStatusBlock);
}
ULONG

View file

@ -46,7 +46,7 @@ MmZeroPageThread(VOID)
/* Get the discardable sections to free them */
MiFindInitializationCode(&StartAddress, &EndAddress);
if (StartAddress) MiFreeInitializationCode(StartAddress, EndAddress);
DPRINT("Free non-cache pages: %lx\n", MmAvailablePages + MiMemoryConsumers[MC_CACHE].PagesUsed);
DPRINT("Free pages: %lx\n", MmAvailablePages);
/* Set our priority to 0 */
Thread->BasePriority = 0;

View file

@ -28,7 +28,6 @@ MM_ALLOCATION_REQUEST, *PMM_ALLOCATION_REQUEST;
MM_MEMORY_CONSUMER MiMemoryConsumers[MC_MAXIMUM];
static ULONG MiMinimumAvailablePages;
static ULONG MiNrTotalPages;
static LIST_ENTRY AllocationListHead;
static KSPIN_LOCK AllocationListLock;
static ULONG MiMinimumPagesPerRun;
@ -38,6 +37,8 @@ static HANDLE MiBalancerThreadHandle = NULL;
static KEVENT MiBalancerEvent;
static KTIMER MiBalancerTimer;
static LONG PageOutThreadActive;
/* FUNCTIONS ****************************************************************/
CODE_SEG("INIT")
@ -49,24 +50,10 @@ MmInitializeBalancer(ULONG NrAvailablePages, ULONG NrSystemPages)
InitializeListHead(&AllocationListHead);
KeInitializeSpinLock(&AllocationListLock);
MiNrTotalPages = NrAvailablePages;
/* Set up targets. */
MiMinimumAvailablePages = 256;
MiMinimumPagesPerRun = 256;
if ((NrAvailablePages + NrSystemPages) >= 8192)
{
MiMemoryConsumers[MC_CACHE].PagesTarget = NrAvailablePages / 4 * 3;
}
else if ((NrAvailablePages + NrSystemPages) >= 4096)
{
MiMemoryConsumers[MC_CACHE].PagesTarget = NrAvailablePages / 3 * 2;
}
else
{
MiMemoryConsumers[MC_CACHE].PagesTarget = NrAvailablePages / 8;
}
MiMemoryConsumers[MC_USER].PagesTarget = NrAvailablePages - MiMinimumAvailablePages;
MiMemoryConsumers[MC_USER].PagesTarget = NrAvailablePages / 2;
}
CODE_SEG("INIT")
@ -95,11 +82,7 @@ MmReleasePageMemoryConsumer(ULONG Consumer, PFN_NUMBER Page)
KeBugCheck(MEMORY_MANAGEMENT);
}
if (MmGetReferenceCountPage(Page) == 1)
{
if(Consumer == MC_USER) MmRemoveLRUUserPage(Page);
(void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
}
(void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
MmDereferencePage(Page);
@ -121,28 +104,21 @@ MiTrimMemoryConsumer(ULONG Consumer, ULONG InitialTarget)
return InitialTarget;
}
if (MiMemoryConsumers[Consumer].PagesUsed > MiMemoryConsumers[Consumer].PagesTarget)
{
/* Consumer page limit exceeded */
Target = max(Target, MiMemoryConsumers[Consumer].PagesUsed - MiMemoryConsumers[Consumer].PagesTarget);
}
if (MmAvailablePages < MiMinimumAvailablePages)
{
/* Global page limit exceeded */
Target = (ULONG)max(Target, MiMinimumAvailablePages - MmAvailablePages);
}
else if (MiMemoryConsumers[Consumer].PagesUsed > MiMemoryConsumers[Consumer].PagesTarget)
{
/* Consumer page limit exceeded */
Target = max(Target, MiMemoryConsumers[Consumer].PagesUsed - MiMemoryConsumers[Consumer].PagesTarget);
}
if (Target)
{
if (!InitialTarget)
{
/* If there was no initial target,
* swap at least MiMinimumPagesPerRun */
Target = max(Target, MiMinimumPagesPerRun);
}
/* Now swap the pages out */
Status = MiMemoryConsumers[Consumer].Trim(Target, 0, &NrFreedPages);
Status = MiMemoryConsumers[Consumer].Trim(Target, MmAvailablePages < MiMinimumAvailablePages, &NrFreedPages);
DPRINT("Trimming consumer %lu: Freed %lu pages with a target of %lu pages\n", Consumer, NrFreedPages, Target);
@ -150,50 +126,128 @@ MiTrimMemoryConsumer(ULONG Consumer, ULONG InitialTarget)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
/* Update the target */
if (NrFreedPages < Target)
Target -= NrFreedPages;
else
Target = 0;
/* Return the remaining pages needed to meet the target */
return Target;
}
else
{
/* Initial target is zero and we don't have anything else to add */
return 0;
}
/* Return the page count needed to be freed to meet the initial target */
return (InitialTarget > NrFreedPages) ? (InitialTarget - NrFreedPages) : 0;
}
NTSTATUS
MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages)
{
PFN_NUMBER CurrentPage;
PFN_NUMBER NextPage;
NTSTATUS Status;
(*NrFreedPages) = 0;
DPRINT1("MM BALANCER: %s\n", Priority ? "Paging out!" : "Removing access bit!");
CurrentPage = MmGetLRUFirstUserPage();
while (CurrentPage != 0 && Target > 0)
{
Status = MmPageOutPhysicalAddress(CurrentPage);
if (NT_SUCCESS(Status))
if (Priority)
{
DPRINT("Succeeded\n");
Status = MmPageOutPhysicalAddress(CurrentPage);
if (NT_SUCCESS(Status))
{
DPRINT("Succeeded\n");
Target--;
(*NrFreedPages)++;
}
}
else
{
/* When not paging-out agressively, just reset the accessed bit */
PEPROCESS Process = NULL;
PVOID Address = NULL;
BOOLEAN Accessed = FALSE;
/*
* We have a lock-ordering problem here. We cant lock the PFN DB before the Process address space.
* So we must use circonvoluted loops.
* Well...
*/
while (TRUE)
{
KAPC_STATE ApcState;
KIRQL OldIrql = MiAcquirePfnLock();
PMM_RMAP_ENTRY Entry = MmGetRmapListHeadPage(CurrentPage);
while (Entry)
{
if (RMAP_IS_SEGMENT(Entry->Address))
{
Entry = Entry->Next;
continue;
}
/* Check that we didn't treat this entry before */
if (Entry->Address < Address)
{
Entry = Entry->Next;
continue;
}
if ((Entry->Address == Address) && (Entry->Process <= Process))
{
Entry = Entry->Next;
continue;
}
break;
}
if (!Entry)
{
MiReleasePfnLock(OldIrql);
break;
}
Process = Entry->Process;
Address = Entry->Address;
MiReleasePfnLock(OldIrql);
KeStackAttachProcess(&Process->Pcb, &ApcState);
MmLockAddressSpace(&Process->Vm);
/* Be sure this is still valid. */
PMMPTE Pte = MiAddressToPte(Address);
if (Pte->u.Hard.Valid)
{
Accessed = Accessed || Pte->u.Hard.Accessed;
Pte->u.Hard.Accessed = 0;
/* There is no need to invalidate, the balancer thread is never on a user process */
//KeInvalidateTlbEntry(Address);
}
MmUnlockAddressSpace(&Process->Vm);
KeUnstackDetachProcess(&ApcState);
}
if (!Accessed)
{
/* Nobody accessed this page since the last time we check. Time to clean up */
Status = MmPageOutPhysicalAddress(CurrentPage);
// DPRINT1("Paged-out one page: %s\n", NT_SUCCESS(Status) ? "Yes" : "No");
(void)Status;
}
/* Done for this page. */
Target--;
(*NrFreedPages)++;
}
NextPage = MmGetLRUNextUserPage(CurrentPage);
if (NextPage <= CurrentPage)
{
/* We wrapped around, so we're done */
break;
}
CurrentPage = NextPage;
CurrentPage = MmGetLRUNextUserPage(CurrentPage, TRUE);
}
if (CurrentPage)
{
KIRQL OldIrql = MiAcquirePfnLock();
MmDereferencePage(CurrentPage);
MiReleasePfnLock(OldIrql);
}
return STATUS_SUCCESS;
@ -210,8 +264,7 @@ VOID
NTAPI
MmRebalanceMemoryConsumers(VOID)
{
if (MiBalancerThreadHandle != NULL &&
!MiIsBalancerThread())
if (InterlockedCompareExchange(&PageOutThreadActive, 0, 1) == 0)
{
KeSetEvent(&MiBalancerEvent, IO_NO_INCREMENT, FALSE);
}
@ -222,80 +275,10 @@ NTAPI
MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
PPFN_NUMBER AllocatedPage)
{
ULONG PagesUsed;
PFN_NUMBER Page;
/*
* Make sure we don't exceed our individual target.
*/
PagesUsed = InterlockedIncrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
if (PagesUsed > MiMemoryConsumers[Consumer].PagesTarget &&
!MiIsBalancerThread())
{
MmRebalanceMemoryConsumers();
}
/*
* Allocate always memory for the non paged pool and for the pager thread.
*/
if ((Consumer == MC_SYSTEM) /* || MiIsBalancerThread() */)
{
Page = MmAllocPage(Consumer);
if (Page == 0)
{
KeBugCheck(NO_PAGES_AVAILABLE);
}
if (Consumer == MC_USER) MmInsertLRULastUserPage(Page);
*AllocatedPage = Page;
if (MmAvailablePages < MiMinimumAvailablePages)
MmRebalanceMemoryConsumers();
return(STATUS_SUCCESS);
}
/*
* Make sure we don't exceed global targets.
*/
if (((MmAvailablePages < MiMinimumAvailablePages) && !MiIsBalancerThread())
|| (MmAvailablePages < (MiMinimumAvailablePages / 2)))
{
MM_ALLOCATION_REQUEST Request;
if (!CanWait)
{
(void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
MmRebalanceMemoryConsumers();
return(STATUS_NO_MEMORY);
}
/* Insert an allocation request. */
Request.Page = 0;
KeInitializeEvent(&Request.Event, NotificationEvent, FALSE);
ExInterlockedInsertTailList(&AllocationListHead, &Request.ListEntry, &AllocationListLock);
MmRebalanceMemoryConsumers();
KeWaitForSingleObject(&Request.Event,
0,
KernelMode,
FALSE,
NULL);
Page = Request.Page;
if (Page == 0)
{
KeBugCheck(NO_PAGES_AVAILABLE);
}
if(Consumer == MC_USER) MmInsertLRULastUserPage(Page);
*AllocatedPage = Page;
if (MmAvailablePages < MiMinimumAvailablePages)
{
MmRebalanceMemoryConsumers();
}
return(STATUS_SUCCESS);
}
/* Update the target */
InterlockedIncrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
/*
* Actually allocate the page.
@ -305,14 +288,8 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
{
KeBugCheck(NO_PAGES_AVAILABLE);
}
if(Consumer == MC_USER) MmInsertLRULastUserPage(Page);
*AllocatedPage = Page;
if (MmAvailablePages < MiMinimumAvailablePages)
{
MmRebalanceMemoryConsumers();
}
return(STATUS_SUCCESS);
}
@ -387,6 +364,9 @@ MiBalancerThread(PVOID Unused)
}
}
while (InitialTarget != 0);
if (Status == STATUS_WAIT_0)
InterlockedDecrement(&PageOutThreadActive);
}
else
{
@ -443,22 +423,14 @@ MiInitBalancerThread(VOID)
{
KPRIORITY Priority;
NTSTATUS Status;
#if !defined(__GNUC__)
LARGE_INTEGER dummyJunkNeeded;
dummyJunkNeeded.QuadPart = -20000000; /* 2 sec */
;
#endif
LARGE_INTEGER Timeout;
KeInitializeEvent(&MiBalancerEvent, SynchronizationEvent, FALSE);
KeInitializeTimerEx(&MiBalancerTimer, SynchronizationTimer);
Timeout.QuadPart = -20000000; /* 2 sec */
KeSetTimerEx(&MiBalancerTimer,
#if defined(__GNUC__)
(LARGE_INTEGER)(LONGLONG)-20000000LL, /* 2 sec */
#else
dummyJunkNeeded,
#endif
Timeout,
2000, /* 2 sec */
NULL);

View file

@ -35,95 +35,127 @@ SIZE_T MmPagedPoolCommit;
SIZE_T MmPeakCommitment;
SIZE_T MmtotalCommitLimitMaximum;
static RTL_BITMAP MiUserPfnBitMap;
PMMPFN FirstUserLRUPfn;
PMMPFN LastUserLRUPfn;
/* FUNCTIONS *************************************************************/
VOID
NTAPI
MiInitializeUserPfnBitmap(VOID)
{
PVOID Bitmap;
/* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
Bitmap = ExAllocatePoolWithTag(NonPagedPool,
(((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
TAG_MM);
ASSERT(Bitmap);
/* Initialize it and clear all the bits to begin with */
RtlInitializeBitMap(&MiUserPfnBitMap,
Bitmap,
(ULONG)MmHighestPhysicalPage + 1);
RtlClearAllBits(&MiUserPfnBitMap);
}
PFN_NUMBER
NTAPI
MmGetLRUFirstUserPage(VOID)
{
ULONG Position;
PFN_NUMBER Page;
KIRQL OldIrql;
/* Find the first user page */
OldIrql = MiAcquirePfnLock();
Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
MiReleasePfnLock(OldIrql);
if (Position == 0xFFFFFFFF) return 0;
/* Return it */
ASSERT(Position != 0);
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
return Position;
if (FirstUserLRUPfn == NULL)
{
MiReleasePfnLock(OldIrql);
return 0;
}
Page = MiGetPfnEntryIndex(FirstUserLRUPfn);
MmReferencePage(Page);
MiReleasePfnLock(OldIrql);
return Page;
}
static
VOID
NTAPI
MmInsertLRULastUserPage(PFN_NUMBER Pfn)
MmInsertLRULastUserPage(PFN_NUMBER Page)
{
KIRQL OldIrql;
MI_ASSERT_PFN_LOCK_HELD();
/* Set the page as a user page */
ASSERT(Pfn != 0);
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
OldIrql = MiAcquirePfnLock();
RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
MiReleasePfnLock(OldIrql);
PMMPFN Pfn = MiGetPfnEntry(Page);
if (FirstUserLRUPfn == NULL)
FirstUserLRUPfn = Pfn;
Pfn->PreviousLRU = LastUserLRUPfn;
if (LastUserLRUPfn != NULL)
LastUserLRUPfn->NextLRU = Pfn;
LastUserLRUPfn = Pfn;
}
static
VOID
MmRemoveLRUUserPage(PFN_NUMBER Page)
{
MI_ASSERT_PFN_LOCK_HELD();
/* Unset the page as a user page */
ASSERT(Page != 0);
PMMPFN Pfn = MiGetPfnEntry(Page);
ASSERT_IS_ROS_PFN(Pfn);
if (Pfn->PreviousLRU)
{
ASSERT(Pfn->PreviousLRU->NextLRU == Pfn);
Pfn->PreviousLRU->NextLRU = Pfn->NextLRU;
}
else
{
ASSERT(FirstUserLRUPfn == Pfn);
FirstUserLRUPfn = Pfn->NextLRU;
}
if (Pfn->NextLRU)
{
ASSERT(Pfn->NextLRU->PreviousLRU == Pfn);
Pfn->NextLRU->PreviousLRU = Pfn->PreviousLRU;
}
else
{
ASSERT(Pfn == LastUserLRUPfn);
LastUserLRUPfn = Pfn->PreviousLRU;
}
Pfn->PreviousLRU = Pfn->NextLRU = NULL;
}
PFN_NUMBER
NTAPI
MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
MmGetLRUNextUserPage(PFN_NUMBER PreviousPage, BOOLEAN MoveToLast)
{
ULONG Position;
PFN_NUMBER Page = 0;
KIRQL OldIrql;
/* Find the next user page */
OldIrql = MiAcquirePfnLock();
Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
PMMPFN PreviousPfn = MiGetPfnEntry(PreviousPage);
PMMPFN NextPfn = PreviousPfn->NextLRU;
/*
* Move this one at the end of the list.
* It may be freed by MmDereferencePage below.
* If it's not, then it means it is still hanging in some process address space.
* This avoids paging-out e.g. ntdll early just because it's mapped first time.
*/
if ((MoveToLast) && (MmGetReferenceCountPage(PreviousPage) > 1))
{
MmRemoveLRUUserPage(PreviousPage);
MmInsertLRULastUserPage(PreviousPage);
}
if (NextPfn)
{
Page = MiGetPfnEntryIndex(NextPfn);
MmReferencePage(Page);
}
MmDereferencePage(PreviousPage);
MiReleasePfnLock(OldIrql);
if (Position == 0xFFFFFFFF) return 0;
/* Return it */
ASSERT(Position != 0);
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
return Position;
}
VOID
NTAPI
MmRemoveLRUUserPage(PFN_NUMBER Page)
{
KIRQL OldIrql;
/* Unset the page as a user page */
ASSERT(Page != 0);
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
OldIrql = MiAcquirePfnLock();
RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
MiReleasePfnLock(OldIrql);
return Page;
}
BOOLEAN
@ -392,10 +424,11 @@ VOID
NTAPI
MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
{
KIRQL oldIrql;
PMMPFN Pfn1;
oldIrql = MiAcquirePfnLock();
/* PFN database must be locked */
MI_ASSERT_PFN_LOCK_HELD();
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
@ -418,35 +451,31 @@ MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
/* ReactOS semantics will now release the page, which will make it free and enter a colored list */
}
MiReleasePfnLock(oldIrql);
}
PMM_RMAP_ENTRY
NTAPI
MmGetRmapListHeadPage(PFN_NUMBER Pfn)
{
KIRQL oldIrql;
PMM_RMAP_ENTRY ListHead;
PMMPFN Pfn1;
/* Lock PFN database */
oldIrql = MiAcquirePfnLock();
/* PFN database must be locked */
MI_ASSERT_PFN_LOCK_HELD();
/* Get the entry */
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
/* Get the list head */
ListHead = Pfn1->RmapListHead;
if (!MI_IS_ROS_PFN(Pfn1))
{
return NULL;
}
/* Should not have an RMAP for a non-active page */
ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
/* Release PFN database and return rmap list head */
MiReleasePfnLock(oldIrql);
return ListHead;
/* Get the list head */
return Pfn1->RmapListHead;
}
VOID
@ -550,6 +579,13 @@ MmDereferencePage(PFN_NUMBER Pfn)
Pfn1->u3.e2.ReferenceCount--;
if (Pfn1->u3.e2.ReferenceCount == 0)
{
/* Apply LRU hack */
if (Pfn1->u4.MustBeCached)
{
MmRemoveLRUUserPage(Pfn);
Pfn1->u4.MustBeCached = 0;
}
/* Mark the page temporarily as valid, we're going to make it free soon */
Pfn1->u3.e1.PageLocation = ActiveAndValid;
@ -592,6 +628,15 @@ MmAllocPage(ULONG Type)
Pfn1->u1.SwapEntry = 0;
Pfn1->RmapListHead = NULL;
Pfn1->NextLRU = NULL;
Pfn1->PreviousLRU = NULL;
if (Type == MC_USER)
{
Pfn1->u4.MustBeCached = 1; /* HACK again */
MmInsertLRULastUserPage(PfnOffset);
}
MiReleasePfnLock(OldIrql);
return PfnOffset;
}

View file

@ -585,6 +585,46 @@ MmSetDirtyPage(PEPROCESS Process, PVOID Address)
}
}
VOID
NTAPI
MmClearPageAccessedBit(PEPROCESS Process, PVOID Address)
{
PULONG Pt;
LONG Pte;
KIRQL OldIrql;
if (Address < MmSystemRangeStart && Process == NULL)
{
DPRINT1("MmClearPageAccessedBit is called for user space without a process.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql);
if (Pt == NULL)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
do
{
Pte = *Pt;
} while (Pte != InterlockedCompareExchangePte(Pt, Pte & ~PA_ACCESSED, Pte));
if (!(Pte & PA_PRESENT))
{
KeBugCheck(MEMORY_MANAGEMENT);
}
MiFlushTlb(Pt, Address, OldIrql);
}
BOOLEAN
NTAPI
MmIsPageAccessed(PEPROCESS Process, PVOID Address)
{
return BooleanFlagOn(MmGetPageEntryForProcess(Process, Address), PA_ACCESSED);
}
BOOLEAN
NTAPI
MmIsPagePresent(PEPROCESS Process, PVOID Address)

View file

@ -165,12 +165,13 @@ MiMakeProtectionMask(
static VOID
MmInsertMemoryArea(
PMMSUPPORT AddressSpace,
PMEMORY_AREA marea)
PMEMORY_AREA marea,
ULONG Protect)
{
PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
marea->VadNode.u.VadFlags.Spare = 1;
marea->VadNode.u.VadFlags.Protection = MiMakeProtectionMask(marea->Protect);
marea->VadNode.u.VadFlags.Protection = MiMakeProtectionMask(Protect);
/* Build a lame VAD if this is a user-space allocation */
if (marea->VadNode.EndingVpn + 1 < (ULONG_PTR)MmSystemRangeStart >> PAGE_SHIFT)
@ -178,7 +179,11 @@ MmInsertMemoryArea(
ASSERT(Process != NULL);
if (marea->Type != MEMORY_AREA_OWNED_BY_ARM3)
{
#ifdef NEWCC
ASSERT(marea->Type == MEMORY_AREA_SECTION_VIEW || marea->Type == MEMORY_AREA_CACHE);
#else
ASSERT(marea->Type == MEMORY_AREA_SECTION_VIEW);
#endif
/* Insert the VAD */
MiLockProcessWorkingSetUnsafe(PsGetCurrentProcess(), PsGetCurrentThread());
@ -353,7 +358,11 @@ MmFreeMemoryArea(
if (MemoryArea->Vad)
{
ASSERT(MemoryArea->VadNode.EndingVpn + 1 < (ULONG_PTR)MmSystemRangeStart >> PAGE_SHIFT);
#ifdef NEWCC
ASSERT(MemoryArea->Type == MEMORY_AREA_SECTION_VIEW || MemoryArea->Type == MEMORY_AREA_CACHE);
#else
ASSERT(MemoryArea->Type == MEMORY_AREA_SECTION_VIEW);
#endif
/* MmCleanProcessAddressSpace might have removed it (and this would be MmDeleteProcessAdressSpace) */
ASSERT(MemoryArea->VadNode.u.VadFlags.Spare != 0);
@ -449,7 +458,6 @@ MmCreateMemoryArea(PMMSUPPORT AddressSpace,
RtlZeroMemory(MemoryArea, sizeof(MEMORY_AREA));
MemoryArea->Type = Type & ~MEMORY_AREA_STATIC;
MemoryArea->Protect = Protect;
MemoryArea->Flags = AllocationFlags;
MemoryArea->Magic = 'erAM';
MemoryArea->DeleteInProgress = FALSE;
@ -470,7 +478,7 @@ MmCreateMemoryArea(PMMSUPPORT AddressSpace,
MemoryArea->VadNode.StartingVpn = (ULONG_PTR)*BaseAddress >> PAGE_SHIFT;
MemoryArea->VadNode.EndingVpn = ((ULONG_PTR)*BaseAddress + tmpLength - 1) >> PAGE_SHIFT;
MmInsertMemoryArea(AddressSpace, MemoryArea);
MmInsertMemoryArea(AddressSpace, MemoryArea, Protect);
}
else
{
@ -508,7 +516,7 @@ MmCreateMemoryArea(PMMSUPPORT AddressSpace,
MemoryArea->VadNode.StartingVpn = (ULONG_PTR)*BaseAddress >> PAGE_SHIFT;
MemoryArea->VadNode.EndingVpn = ((ULONG_PTR)*BaseAddress + tmpLength - 1) >> PAGE_SHIFT;
MmInsertMemoryArea(AddressSpace, MemoryArea);
MmInsertMemoryArea(AddressSpace, MemoryArea, Protect);
}
*Result = MemoryArea;
@ -535,9 +543,6 @@ MiRosCleanupMemoryArea(
(Process->ActiveThreads == 1)) ||
(Process->ActiveThreads == 0));
/* We are in cleanup, we don't need to synchronize */
MmUnlockAddressSpace(&Process->Vm);
MemoryArea = (PMEMORY_AREA)Vad;
BaseAddress = (PVOID)MA_GetStartingAddress(MemoryArea);
@ -545,10 +550,12 @@ MiRosCleanupMemoryArea(
{
Status = MiRosUnmapViewOfSection(Process, BaseAddress, Process->ProcessExiting);
}
#ifdef NEWCC
else if (MemoryArea->Type == MEMORY_AREA_CACHE)
{
Status = MmUnmapViewOfCacheSegment(&Process->Vm, BaseAddress);
}
#endif
else
{
/* There shouldn't be anything else! */
@ -557,9 +564,6 @@ MiRosCleanupMemoryArea(
/* Make sure this worked! */
ASSERT(NT_SUCCESS(Status));
/* Lock the address space again */
MmLockAddressSpace(&Process->Vm);
}
VOID

View file

@ -79,7 +79,7 @@ MmpAccessFault(KPROCESSOR_MODE Mode,
MemoryArea,
(PVOID)Address);
break;
#ifdef NEWCC
case MEMORY_AREA_CACHE:
// This code locks for itself to keep from having to break a lock
// passed in.
@ -89,7 +89,7 @@ MmpAccessFault(KPROCESSOR_MODE Mode,
if (!FromMdl)
MmLockAddressSpace(AddressSpace);
break;
#endif
default:
Status = STATUS_ACCESS_VIOLATION;
break;
@ -171,7 +171,7 @@ MmNotPresentFault(KPROCESSOR_MODE Mode,
(PVOID)Address,
FromMdl);
break;
#ifdef NEWCC
case MEMORY_AREA_CACHE:
// This code locks for itself to keep from having to break a lock
// passed in.
@ -181,7 +181,7 @@ MmNotPresentFault(KPROCESSOR_MODE Mode,
if (!FromMdl)
MmLockAddressSpace(AddressSpace);
break;
#endif
default:
Status = STATUS_ACCESS_VIOLATION;
break;

View file

@ -17,8 +17,6 @@
/* GLOBALS *******************************************************************/
VOID NTAPI MiInitializeUserPfnBitmap(VOID);
BOOLEAN Mm64BitPhysicalAddress = FALSE;
ULONG MmReadClusterSize;
//
@ -214,12 +212,17 @@ MmInitSystem(IN ULONG Phase,
/* Initialize the kernel address space */
ASSERT(Phase == 1);
#ifdef NEWCC
InitializeListHead(&MiSegmentList);
ExInitializeFastMutex(&MiGlobalPageOperation);
KeInitializeEvent(&MmWaitPageEvent, SynchronizationEvent, FALSE);
// Until we're fully demand paged, we can do things the old way through
// the balance manager
// CcInitView will override this...
MmInitializeMemoryConsumer(MC_CACHE, MiRosTrimCache);
#else
KeInitializeEvent(&MmWaitPageEvent, SynchronizationEvent, FALSE);
#endif
MmKernelAddressSpace = &PsIdleProcess->Vm;
@ -230,7 +233,6 @@ MmInitSystem(IN ULONG Phase,
MiDbgDumpAddressSpace();
MmInitGlobalKernelPageDirectory();
MiInitializeUserPfnBitmap();
MmInitializeMemoryConsumer(MC_USER, MmTrimUserMemory);
MmInitializeRmapList();
MmInitSectionImplementation();

View file

@ -94,9 +94,6 @@ NTAPI
MmBuildMdlFromPages(PMDL Mdl, PPFN_NUMBER Pages)
{
memcpy(Mdl + 1, Pages, sizeof(PFN_NUMBER) * (PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE));
/* FIXME: this flag should be set by the caller perhaps? */
Mdl->MdlFlags |= MDL_IO_PAGE_READ;
}
@ -230,7 +227,7 @@ MiReadPageFile(
MmInitializeMdl(Mdl, NULL, PAGE_SIZE);
MmBuildMdlFromPages(Mdl, &Page);
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
Mdl->MdlFlags |= MDL_PAGES_LOCKED | MDL_IO_PAGE_READ;
file_offset.QuadPart = PageFileOffset * PAGE_SIZE;

View file

@ -19,7 +19,6 @@
/* GLOBALS ******************************************************************/
static NPAGED_LOOKASIDE_LIST RmapLookasideList;
FAST_MUTEX RmapListLock;
/* FUNCTIONS ****************************************************************/
@ -38,7 +37,6 @@ VOID
NTAPI
MmInitializeRmapList(VOID)
{
ExInitializeFastMutex(&RmapListLock);
ExInitializeNPagedLookasideList (&RmapLookasideList,
NULL,
RmapListFree,
@ -55,37 +53,28 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
PMM_RMAP_ENTRY entry;
PMEMORY_AREA MemoryArea;
PMMSUPPORT AddressSpace;
ULONG Type;
PVOID Address;
PEPROCESS Process;
ULONGLONG Offset;
PVOID Address = NULL;
PEPROCESS Process = NULL;
NTSTATUS Status = STATUS_SUCCESS;
PMM_SECTION_SEGMENT Segment;
LARGE_INTEGER SegmentOffset;
KIRQL OldIrql;
GetEntry:
OldIrql = MiAcquirePfnLock();
ExAcquireFastMutex(&RmapListLock);
entry = MmGetRmapListHeadPage(Page);
#ifdef NEWCC
// Special case for NEWCC: we can have a page that's only in a segment
// page table
if (entry && RMAP_IS_SEGMENT(entry->Address) && entry->Next == NULL)
{
/* NEWCC does locking itself */
ExReleaseFastMutex(&RmapListLock);
return MmpPageOutPhysicalAddress(Page);
}
#endif
while (entry && RMAP_IS_SEGMENT(entry->Address))
entry = entry->Next;
if (entry == NULL)
{
ExReleaseFastMutex(&RmapListLock);
return(STATUS_UNSUCCESSFUL);
MiReleasePfnLock(OldIrql);
goto WriteSegment;
}
Process = entry->Process;
Address = entry->Address;
if ((((ULONG_PTR)Address) & 0xFFF) != 0)
@ -93,58 +82,64 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
KeBugCheck(MEMORY_MANAGEMENT);
}
if (Address < MmSystemRangeStart)
{
if (!ExAcquireRundownProtection(&Process->RundownProtect))
{
ExReleaseFastMutex(&RmapListLock);
return STATUS_PROCESS_IS_TERMINATING;
}
/* This is for user-mode address only */
ASSERT(Address < MmSystemRangeStart);
Status = ObReferenceObjectByPointer(Process, PROCESS_ALL_ACCESS, NULL, KernelMode);
ExReleaseFastMutex(&RmapListLock);
if (!NT_SUCCESS(Status))
{
ExReleaseRundownProtection(&Process->RundownProtect);
return Status;
}
AddressSpace = &Process->Vm;
}
else
if (!ExAcquireRundownProtection(&Process->RundownProtect))
{
ExReleaseFastMutex(&RmapListLock);
AddressSpace = MmGetKernelAddressSpace();
MiReleasePfnLock(OldIrql);
return STATUS_PROCESS_IS_TERMINATING;
}
Status = ObReferenceObjectByPointer(Process, PROCESS_ALL_ACCESS, NULL, KernelMode);
MiReleasePfnLock(OldIrql);
if (!NT_SUCCESS(Status))
{
ExReleaseRundownProtection(&Process->RundownProtect);
return Status;
}
AddressSpace = &Process->Vm;
MmLockAddressSpace(AddressSpace);
if (MmGetPfnForProcess(Process, Address) != Page)
{
/* This changed in the short window where we didn't have any locks */
MmUnlockAddressSpace(AddressSpace);
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
goto GetEntry;
}
MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, Address);
if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
{
MmUnlockAddressSpace(AddressSpace);
if (Address < MmSystemRangeStart)
{
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
}
return(STATUS_UNSUCCESSFUL);
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
goto GetEntry;
}
Type = MemoryArea->Type;
if (Type == MEMORY_AREA_SECTION_VIEW)
if (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW)
{
ULONG_PTR Entry;
Offset = MemoryArea->Data.SectionData.ViewOffset.QuadPart +
BOOLEAN Dirty;
PFN_NUMBER MapPage;
LARGE_INTEGER Offset;
BOOLEAN Released;
Offset.QuadPart = MemoryArea->SectionData.ViewOffset +
((ULONG_PTR)Address - MA_GetStartingAddress(MemoryArea));
MmLockSectionSegment(MemoryArea->Data.SectionData.Segment);
Segment = MemoryArea->SectionData.Segment;
/*
* Get or create a pageop
*/
Entry = MmGetPageEntrySectionSegment(MemoryArea->Data.SectionData.Segment,
(PLARGE_INTEGER)&Offset);
MmLockSectionSegment(Segment);
Entry = MmGetPageEntrySectionSegment(Segment, &Offset);
if (Entry && MM_IS_WAIT_PTE(Entry))
{
MmUnlockSectionSegment(MemoryArea->Data.SectionData.Segment);
/* The segment is being read or something. Give up */
MmUnlockSectionSegment(Segment);
MmUnlockAddressSpace(AddressSpace);
if (Address < MmSystemRangeStart)
{
@ -154,36 +149,147 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
return(STATUS_UNSUCCESSFUL);
}
MmSetPageEntrySectionSegment(MemoryArea->Data.SectionData.Segment, (PLARGE_INTEGER)&Offset, MAKE_SWAP_SSE(MM_WAIT_ENTRY));
/* Delete this virtual mapping in the process */
MmDeleteVirtualMapping(Process, Address, &Dirty, &MapPage);
/*
* Release locks now we have a page op.
*/
MmUnlockSectionSegment(MemoryArea->Data.SectionData.Segment);
/* We checked this earlier */
ASSERT(MapPage == Page);
if (Page != PFN_FROM_SSE(Entry))
{
SWAPENTRY SwapEntry;
/* This page is private to the process */
MmUnlockSectionSegment(Segment);
/* Check if we should write it back to the page file */
SwapEntry = MmGetSavedSwapEntryPage(Page);
if ((SwapEntry == 0) && Dirty)
{
/* We don't have a Swap entry, yet the page is dirty. Get one */
SwapEntry = MmAllocSwapPage();
if (!SwapEntry)
{
PMM_REGION Region = MmFindRegion((PVOID)MA_GetStartingAddress(MemoryArea),
&MemoryArea->SectionData.RegionListHead,
Address, NULL);
/* We can't, so let this page in the Process VM */
MmCreateVirtualMapping(Process, Address, Region->Protect, &Page, 1);
MmSetDirtyPage(Process, Address);
MmUnlockAddressSpace(AddressSpace);
if (Address < MmSystemRangeStart)
{
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
}
return STATUS_UNSUCCESSFUL;
}
}
if (Dirty)
{
Status = MmWriteToSwapPage(SwapEntry, Page);
if (!NT_SUCCESS(Status))
{
/* We failed at saving the content of this page. Keep it in */
PMM_REGION Region = MmFindRegion((PVOID)MA_GetStartingAddress(MemoryArea),
&MemoryArea->SectionData.RegionListHead,
Address, NULL);
/* This Swap Entry is useless to us */
MmSetSavedSwapEntryPage(Page, 0);
MmFreeSwapPage(SwapEntry);
/* We can't, so let this page in the Process VM */
MmCreateVirtualMapping(Process, Address, Region->Protect, &Page, 1);
MmSetDirtyPage(Process, Address);
MmUnlockAddressSpace(AddressSpace);
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
return STATUS_UNSUCCESSFUL;
}
}
if (SwapEntry)
{
/* Keep this in the process VM */
MmCreatePageFileMapping(Process, Address, SwapEntry);
MmSetSavedSwapEntryPage(Page, 0);
}
MmUnlockAddressSpace(AddressSpace);
/* We can finally let this page go */
MmDeleteRmap(Page, Process, Address);
#if DBG
OldIrql = MiAcquirePfnLock();
ASSERT(MmGetRmapListHeadPage(Page) == NULL);
MiReleasePfnLock(OldIrql);
#endif
MmReleasePageMemoryConsumer(MC_USER, Page);
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
return STATUS_SUCCESS;
}
/* Delete this RMAP */
MmDeleteRmap(Page, Process, Address);
/* One less mapping referencing this segment */
Released = MmUnsharePageEntrySectionSegment(MemoryArea, Segment, &Offset, Dirty, TRUE, NULL);
MmUnlockSectionSegment(Segment);
MmUnlockAddressSpace(AddressSpace);
/*
* Do the actual page out work.
*/
Status = MmPageOutSectionView(AddressSpace, MemoryArea, Address, Entry);
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
if (Released) return STATUS_SUCCESS;
}
#ifdef NEWCC
else if (Type == MEMORY_AREA_CACHE)
{
/* NEWCC does locking itself */
MmUnlockAddressSpace(AddressSpace);
Status = MmpPageOutPhysicalAddress(Page);
}
#endif
else
{
KeBugCheck(MEMORY_MANAGEMENT);
}
if (Address < MmSystemRangeStart)
WriteSegment:
/* Now write this page to file, if needed */
Segment = MmGetSectionAssociation(Page, &SegmentOffset);
if (Segment)
{
ExReleaseRundownProtection(&Process->RundownProtect);
ObDereferenceObject(Process);
BOOLEAN Released;
MmLockSectionSegment(Segment);
Released = MmCheckDirtySegment(Segment, &SegmentOffset, FALSE, TRUE);
MmUnlockSectionSegment(Segment);
MmDereferenceSegment(Segment);
if (Released)
{
return STATUS_SUCCESS;
}
}
return(Status);
/* If we are here, then we didn't release the page */
return STATUS_UNSUCCESSFUL;
}
VOID
@ -191,12 +297,13 @@ NTAPI
MmSetCleanAllRmaps(PFN_NUMBER Page)
{
PMM_RMAP_ENTRY current_entry;
KIRQL OldIrql;
ExAcquireFastMutex(&RmapListLock);
OldIrql = MiAcquirePfnLock();
current_entry = MmGetRmapListHeadPage(Page);
if (current_entry == NULL)
{
DPRINT1("MmIsDirtyRmap: No rmaps.\n");
DPRINT1("MmSetCleanAllRmaps: No rmaps.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
while (current_entry != NULL)
@ -205,29 +312,7 @@ MmSetCleanAllRmaps(PFN_NUMBER Page)
MmSetCleanPage(current_entry->Process, current_entry->Address);
current_entry = current_entry->Next;
}
ExReleaseFastMutex(&RmapListLock);
}
VOID
NTAPI
MmSetDirtyAllRmaps(PFN_NUMBER Page)
{
PMM_RMAP_ENTRY current_entry;
ExAcquireFastMutex(&RmapListLock);
current_entry = MmGetRmapListHeadPage(Page);
if (current_entry == NULL)
{
DPRINT1("MmIsDirtyRmap: No rmaps.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
while (current_entry != NULL)
{
if (!RMAP_IS_SEGMENT(current_entry->Address))
MmSetDirtyPage(current_entry->Process, current_entry->Address);
current_entry = current_entry->Next;
}
ExReleaseFastMutex(&RmapListLock);
MiReleasePfnLock(OldIrql);
}
BOOLEAN
@ -235,27 +320,31 @@ NTAPI
MmIsDirtyPageRmap(PFN_NUMBER Page)
{
PMM_RMAP_ENTRY current_entry;
KIRQL OldIrql;
BOOLEAN Dirty = FALSE;
ExAcquireFastMutex(&RmapListLock);
OldIrql = MiAcquirePfnLock();
current_entry = MmGetRmapListHeadPage(Page);
if (current_entry == NULL)
{
ExReleaseFastMutex(&RmapListLock);
return(FALSE);
DPRINT1("MmIsDirtyPageRmap: No rmaps.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
while (current_entry != NULL)
{
if (
!RMAP_IS_SEGMENT(current_entry->Address) &&
MmIsDirtyPage(current_entry->Process, current_entry->Address))
if (!RMAP_IS_SEGMENT(current_entry->Address))
{
ExReleaseFastMutex(&RmapListLock);
return(TRUE);
if (MmIsDirtyPage(current_entry->Process, current_entry->Address))
{
Dirty = TRUE;
break;
}
}
current_entry = current_entry->Next;
}
ExReleaseFastMutex(&RmapListLock);
return(FALSE);
MiReleasePfnLock(OldIrql);
return Dirty;
}
VOID
@ -266,6 +355,8 @@ MmInsertRmap(PFN_NUMBER Page, PEPROCESS Process,
PMM_RMAP_ENTRY current_entry;
PMM_RMAP_ENTRY new_entry;
ULONG PrevSize;
KIRQL OldIrql;
if (!RMAP_IS_SEGMENT(Address))
Address = (PVOID)PAGE_ROUND_DOWN(Address);
@ -277,11 +368,7 @@ MmInsertRmap(PFN_NUMBER Page, PEPROCESS Process,
new_entry->Address = Address;
new_entry->Process = (PEPROCESS)Process;
#if DBG
#ifdef __GNUC__
new_entry->Caller = __builtin_return_address(0);
#else
new_entry->Caller = _ReturnAddress();
#endif
#endif
if (
@ -296,89 +383,50 @@ MmInsertRmap(PFN_NUMBER Page, PEPROCESS Process,
KeBugCheck(MEMORY_MANAGEMENT);
}
ExAcquireFastMutex(&RmapListLock);
OldIrql = MiAcquirePfnLock();
current_entry = MmGetRmapListHeadPage(Page);
new_entry->Next = current_entry;
#if DBG
while (current_entry)
{
if (current_entry->Address == new_entry->Address && current_entry->Process == new_entry->Process)
{
DbgPrint("MmInsertRmap tries to add a second rmap entry for address %p\n current caller ",
current_entry->Address);
DbgPrint("%p", new_entry->Caller);
DbgPrint("\n previous caller ");
DbgPrint("%p", current_entry->Caller);
DbgPrint("\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
current_entry = current_entry->Next;
}
#endif
MmSetRmapListHeadPage(Page, new_entry);
ExReleaseFastMutex(&RmapListLock);
if (!RMAP_IS_SEGMENT(Address))
{
if (Process == NULL)
{
Process = PsInitialSystemProcess;
}
if (Process)
{
PrevSize = InterlockedExchangeAddUL(&Process->Vm.WorkingSetSize, PAGE_SIZE);
if (PrevSize >= Process->Vm.PeakWorkingSetSize)
{
Process->Vm.PeakWorkingSetSize = PrevSize + PAGE_SIZE;
}
}
}
}
VOID
NTAPI
MmDeleteAllRmaps(PFN_NUMBER Page, PVOID Context,
VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process,
PVOID Address))
{
PMM_RMAP_ENTRY current_entry;
PMM_RMAP_ENTRY previous_entry;
PEPROCESS Process;
ExAcquireFastMutex(&RmapListLock);
current_entry = MmGetRmapListHeadPage(Page);
if (current_entry == NULL)
{
DPRINT1("MmDeleteAllRmaps: No rmaps.\n");
KeBugCheck(MEMORY_MANAGEMENT);
}
MmSetRmapListHeadPage(Page, NULL);
ExReleaseFastMutex(&RmapListLock);
while (current_entry != NULL)
PMM_RMAP_ENTRY previous_entry = NULL;
/* Keep the list sorted */
while (current_entry && (current_entry->Address < Address))
{
previous_entry = current_entry;
current_entry = current_entry->Next;
if (!RMAP_IS_SEGMENT(previous_entry->Address))
}
/* In case of clash in the address, sort by process */
if (current_entry && (current_entry->Address == Address))
{
while (current_entry && (current_entry->Process < Process))
{
if (DeleteMapping)
{
DeleteMapping(Context, previous_entry->Process,
previous_entry->Address);
}
Process = previous_entry->Process;
ExFreeToNPagedLookasideList(&RmapLookasideList, previous_entry);
if (Process == NULL)
{
Process = PsInitialSystemProcess;
}
if (Process)
{
(void)InterlockedExchangeAddUL(&Process->Vm.WorkingSetSize, -PAGE_SIZE);
}
previous_entry = current_entry;
current_entry = current_entry->Next;
}
else
}
if (current_entry && (current_entry->Address == Address) && (current_entry->Process == Process))
{
DbgPrint("MmInsertRmap tries to add a second rmap entry for address %p\n", current_entry->Address);
DbgPrint(" current caller %p\n", new_entry->Caller);
DbgPrint(" previous caller %p\n", current_entry->Caller);
KeBugCheck(MEMORY_MANAGEMENT);
}
new_entry->Next = current_entry;
if (previous_entry)
previous_entry->Next = new_entry;
else
MmSetRmapListHeadPage(Page, new_entry);
MiReleasePfnLock(OldIrql);
if (!RMAP_IS_SEGMENT(Address))
{
ASSERT(Process != NULL);
PrevSize = InterlockedExchangeAddUL(&Process->Vm.WorkingSetSize, PAGE_SIZE);
if (PrevSize >= Process->Vm.PeakWorkingSetSize)
{
ExFreeToNPagedLookasideList(&RmapLookasideList, previous_entry);
Process->Vm.PeakWorkingSetSize = PrevSize + PAGE_SIZE;
}
}
}
@ -389,8 +437,9 @@ MmDeleteRmap(PFN_NUMBER Page, PEPROCESS Process,
PVOID Address)
{
PMM_RMAP_ENTRY current_entry, previous_entry;
KIRQL OldIrql;
ExAcquireFastMutex(&RmapListLock);
OldIrql = MiAcquirePfnLock();
previous_entry = NULL;
current_entry = MmGetRmapListHeadPage(Page);
@ -407,18 +456,13 @@ MmDeleteRmap(PFN_NUMBER Page, PEPROCESS Process,
{
previous_entry->Next = current_entry->Next;
}
ExReleaseFastMutex(&RmapListLock);
MiReleasePfnLock(OldIrql);
ExFreeToNPagedLookasideList(&RmapLookasideList, current_entry);
if (!RMAP_IS_SEGMENT(Address))
{
if (Process == NULL)
{
Process = PsInitialSystemProcess;
}
if (Process)
{
(void)InterlockedExchangeAddUL(&Process->Vm.WorkingSetSize, -PAGE_SIZE);
}
ASSERT(Process != NULL);
(void)InterlockedExchangeAddUL(&Process->Vm.WorkingSetSize, -PAGE_SIZE);
}
return;
}
@ -448,8 +492,8 @@ MmGetSegmentRmap(PFN_NUMBER Page, PULONG RawOffset)
{
PCACHE_SECTION_PAGE_TABLE Result = NULL;
PMM_RMAP_ENTRY current_entry;//, previous_entry;
KIRQL OldIrql = MiAcquirePfnLock();
ExAcquireFastMutex(&RmapListLock);
//previous_entry = NULL;
current_entry = MmGetRmapListHeadPage(Page);
while (current_entry != NULL)
@ -458,14 +502,20 @@ MmGetSegmentRmap(PFN_NUMBER Page, PULONG RawOffset)
{
Result = (PCACHE_SECTION_PAGE_TABLE)current_entry->Process;
*RawOffset = (ULONG_PTR)current_entry->Address & ~RMAP_SEGMENT_MASK;
InterlockedIncrementUL(&Result->Segment->ReferenceCount);
ExReleaseFastMutex(&RmapListLock);
if (*Result->Segment->Flags & MM_SEGMENT_INDELETE)
{
MiReleasePfnLock(OldIrql);
return NULL;
}
InterlockedIncrement64(Result->Segment->ReferenceCount);
MiReleasePfnLock(OldIrql);
return Result;
}
//previous_entry = current_entry;
current_entry = current_entry->Next;
}
ExReleaseFastMutex(&RmapListLock);
MiReleasePfnLock(OldIrql);
return NULL;
}
@ -480,8 +530,8 @@ NTAPI
MmDeleteSectionAssociation(PFN_NUMBER Page)
{
PMM_RMAP_ENTRY current_entry, previous_entry;
KIRQL OldIrql = MiAcquirePfnLock();
ExAcquireFastMutex(&RmapListLock);
previous_entry = NULL;
current_entry = MmGetRmapListHeadPage(Page);
while (current_entry != NULL)
@ -496,12 +546,12 @@ MmDeleteSectionAssociation(PFN_NUMBER Page)
{
previous_entry->Next = current_entry->Next;
}
ExReleaseFastMutex(&RmapListLock);
MiReleasePfnLock(OldIrql);
ExFreeToNPagedLookasideList(&RmapLookasideList, current_entry);
return;
}
previous_entry = current_entry;
current_entry = current_entry->Next;
}
ExReleaseFastMutex(&RmapListLock);
MiReleasePfnLock(OldIrql);
}

File diff suppressed because it is too large Load diff

View file

@ -21,6 +21,44 @@ VOID
MiShutdownSystem(VOID)
{
ULONG i;
PFN_NUMBER Page;
BOOLEAN Dirty;
/* Loop through all the pages owned by the legacy Mm and page them out, if needed. */
/* We do it twice, since flushing can cause the FS to dirtify new pages */
do
{
Dirty = FALSE;
Page = MmGetLRUFirstUserPage();
while (Page)
{
LARGE_INTEGER SegmentOffset;
PMM_SECTION_SEGMENT Segment = MmGetSectionAssociation(Page, &SegmentOffset);
if (Segment)
{
if ((*Segment->Flags) & MM_DATAFILE_SEGMENT)
{
MmLockSectionSegment(Segment);
ULONG_PTR Entry = MmGetPageEntrySectionSegment(Segment, &SegmentOffset);
if (!IS_SWAP_FROM_SSE(Entry) && IS_DIRTY_SSE(Entry))
{
Dirty = TRUE;
MmCheckDirtySegment(Segment, &SegmentOffset, FALSE, TRUE);
}
MmUnlockSectionSegment(Segment);
}
MmDereferenceSegment(Segment);
}
Page = MmGetLRUNextUserPage(Page, FALSE);
}
} while (Dirty);
/* Loop through all the paging files */
for (i = 0; i < MmNumberOfPagingFiles; i++)

View file

@ -28,7 +28,11 @@ if(NEWCC)
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/lazyrite.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/logsup.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/mdlsup.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/pinsup.c)
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/pinsup.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/fault.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/swapout.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/data.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/reqtools.c)
else()
list(APPEND SOURCE
${REACTOS_SOURCE_DIR}/ntoskrnl/cc/cacheman.c
@ -42,11 +46,7 @@ endif()
list(APPEND SOURCE
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/io.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/data.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/fault.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/reqtools.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/sptab.c
${REACTOS_SOURCE_DIR}/ntoskrnl/cache/section/swapout.c
${REACTOS_SOURCE_DIR}/ntoskrnl/config/cmalloc.c
${REACTOS_SOURCE_DIR}/ntoskrnl/config/cmapi.c
${REACTOS_SOURCE_DIR}/ntoskrnl/config/cmboot.c

View file

@ -4,7 +4,7 @@
* FILE: ntoskrnl/po/power.c
* PURPOSE: Power Manager
* PROGRAMMERS: Casper S. Hornstrup (chorns@users.sourceforge.net)
* Hervé Poussineau (hpoussin@reactos.com)
* Herv<EFBFBD> Poussineau (hpoussin@reactos.com)
*/
/* INCLUDES ******************************************************************/
@ -1072,18 +1072,19 @@ NtSetSystemPowerState(IN POWER_ACTION SystemAction,
/* Check if we're still in an invalid status */
if (!NT_SUCCESS(Status)) break;
/* Flush all volumes and the registry */
DPRINT("Flushing volumes\n");
PopFlushVolumes(PopAction.Shutdown);
#ifndef NEWCC
/* Flush dirty cache pages */
/* XXX: Is that still mandatory? As now we'll wait on lazy writer to complete? */
CcRosFlushDirtyPages(-1, &Dummy, FALSE, FALSE); //HACK: We really should wait here!
CcRosFlushDirtyPages(MAXULONG, &Dummy, TRUE, FALSE);
DPRINT("Cache flushed %lu pages\n", Dummy);
#else
Dummy = 0;
#endif
/* Flush all volumes and the registry */
DPRINT("Flushing volumes, cache flushed %lu pages\n", Dummy);
PopFlushVolumes(PopAction.Shutdown);
/* Set IRP for drivers */
PopAction.IrpMinor = IRP_MN_SET_POWER;
if (PopAction.Shutdown)

View file

@ -358,7 +358,7 @@ PspCreateProcess(OUT PHANDLE ProcessHandle,
PEPROCESS Process, Parent;
PVOID ExceptionPortObject;
PDEBUG_OBJECT DebugObject;
PSECTION_OBJECT SectionObject;
PSECTION SectionObject;
NTSTATUS Status, AccessStatus;
ULONG_PTR DirectoryTableBase[2] = {0,0};
KAFFINITY Affinity;

View file

@ -623,19 +623,6 @@ typedef struct _SEGMENT_OBJECT
PMMSUBSECTION_FLAGS MmSubSectionFlags;
} SEGMENT_OBJECT, *PSEGMENT_OBJECT;
//
// Section Object
//
typedef struct _SECTION_OBJECT
{
PVOID StartingVa;
PVOID EndingVa;
PVOID Parent;
PVOID LeftChild;
PVOID RightChild;
PSEGMENT_OBJECT Segment;
} SECTION_OBJECT, *PSECTION_OBJECT;
//
// Generic Address Range Structure
//