mirror of
https://github.com/reactos/reactos.git
synced 2025-05-18 16:51:18 +00:00
[NTOSKRNL] Overhaul Cc and Mm relationship
Previously, when creating a file section, Mm requested Cc to cache the file, then Cc would request pages from Mm, then Mm would request them back to serve its file-mapping role Now, Mm does it all by itself. If file cahcing is requested by the FS driver, then Cc creates a file mapping and uses that to serve its purpose. This is a rewrite of Cc
This commit is contained in:
parent
e4047d1521
commit
d8cdb89fb0
17 changed files with 1499 additions and 2346 deletions
3
ntoskrnl/cache/section/sptab.c
vendored
3
ntoskrnl/cache/section/sptab.c
vendored
|
@ -187,9 +187,6 @@ _MmSetPageEntrySectionSegment(PMM_SECTION_SEGMENT Segment,
|
|||
ASSERT(Segment->Locked);
|
||||
ASSERT(!IS_SWAP_FROM_SSE(Entry) || !IS_DIRTY_SSE(Entry));
|
||||
|
||||
if (Entry && !IS_SWAP_FROM_SSE(Entry))
|
||||
MmGetRmapListHeadPage(PFN_FROM_SSE(Entry));
|
||||
|
||||
PageTable = MiSectionPageTableGetOrAllocate(&Segment->PageTable, Offset);
|
||||
|
||||
if (!PageTable) return STATUS_NO_MEMORY;
|
||||
|
|
|
@ -20,13 +20,6 @@ static PFN_NUMBER CcZeroPage = 0;
|
|||
|
||||
#define MAX_ZERO_LENGTH (256 * 1024)
|
||||
|
||||
typedef enum _CC_COPY_OPERATION
|
||||
{
|
||||
CcOperationRead,
|
||||
CcOperationWrite,
|
||||
CcOperationZero
|
||||
} CC_COPY_OPERATION;
|
||||
|
||||
typedef enum _CC_CAN_WRITE_RETRY
|
||||
{
|
||||
FirstTry = 0,
|
||||
|
@ -35,7 +28,7 @@ typedef enum _CC_CAN_WRITE_RETRY
|
|||
RetryMasterLocked = 255,
|
||||
} CC_CAN_WRITE_RETRY;
|
||||
|
||||
ULONG CcRosTraceLevel = 0;
|
||||
ULONG CcRosTraceLevel = CC_API_DEBUG;
|
||||
ULONG CcFastMdlReadWait;
|
||||
ULONG CcFastMdlReadNotPossible;
|
||||
ULONG CcFastReadNotPossible;
|
||||
|
@ -76,338 +69,6 @@ CcInitCacheZeroPage (
|
|||
MiZeroPhysicalPage(CcZeroPage);
|
||||
}
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
CcReadVirtualAddress (
|
||||
PROS_VACB Vacb)
|
||||
{
|
||||
ULONG Size;
|
||||
PMDL Mdl;
|
||||
NTSTATUS Status;
|
||||
IO_STATUS_BLOCK IoStatus;
|
||||
KEVENT Event;
|
||||
ULARGE_INTEGER LargeSize;
|
||||
|
||||
LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
|
||||
if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
|
||||
}
|
||||
Size = LargeSize.LowPart;
|
||||
|
||||
Size = ROUND_TO_PAGES(Size);
|
||||
ASSERT(Size <= VACB_MAPPING_GRANULARITY);
|
||||
ASSERT(Size > 0);
|
||||
|
||||
Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
|
||||
if (!Mdl)
|
||||
{
|
||||
return STATUS_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
|
||||
Status = STATUS_SUCCESS;
|
||||
_SEH2_TRY
|
||||
{
|
||||
MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
|
||||
}
|
||||
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
|
||||
{
|
||||
Status = _SEH2_GetExceptionCode();
|
||||
DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
|
||||
KeBugCheck(CACHE_MANAGER);
|
||||
} _SEH2_END;
|
||||
|
||||
if (NT_SUCCESS(Status))
|
||||
{
|
||||
Mdl->MdlFlags |= MDL_IO_PAGE_READ;
|
||||
KeInitializeEvent(&Event, NotificationEvent, FALSE);
|
||||
Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
|
||||
if (Status == STATUS_PENDING)
|
||||
{
|
||||
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
|
||||
Status = IoStatus.Status;
|
||||
}
|
||||
|
||||
MmUnlockPages(Mdl);
|
||||
}
|
||||
|
||||
IoFreeMdl(Mdl);
|
||||
|
||||
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
|
||||
{
|
||||
DPRINT1("IoPageRead failed, Status %x\n", Status);
|
||||
return Status;
|
||||
}
|
||||
|
||||
if (Size < VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
RtlZeroMemory((char*)Vacb->BaseAddress + Size,
|
||||
VACB_MAPPING_GRANULARITY - Size);
|
||||
}
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
CcWriteVirtualAddress (
|
||||
PROS_VACB Vacb)
|
||||
{
|
||||
ULONG Size;
|
||||
PMDL Mdl;
|
||||
NTSTATUS Status;
|
||||
IO_STATUS_BLOCK IoStatus;
|
||||
KEVENT Event;
|
||||
ULARGE_INTEGER LargeSize;
|
||||
|
||||
LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
|
||||
if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
|
||||
}
|
||||
Size = LargeSize.LowPart;
|
||||
//
|
||||
// Nonpaged pool PDEs in ReactOS must actually be synchronized between the
|
||||
// MmGlobalPageDirectory and the real system PDE directory. What a mess...
|
||||
//
|
||||
{
|
||||
ULONG i = 0;
|
||||
do
|
||||
{
|
||||
MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
|
||||
} while (++i < (Size >> PAGE_SHIFT));
|
||||
}
|
||||
|
||||
ASSERT(Size <= VACB_MAPPING_GRANULARITY);
|
||||
ASSERT(Size > 0);
|
||||
|
||||
Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
|
||||
if (!Mdl)
|
||||
{
|
||||
return STATUS_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
|
||||
Status = STATUS_SUCCESS;
|
||||
_SEH2_TRY
|
||||
{
|
||||
MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
|
||||
}
|
||||
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
|
||||
{
|
||||
Status = _SEH2_GetExceptionCode();
|
||||
DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
|
||||
KeBugCheck(CACHE_MANAGER);
|
||||
} _SEH2_END;
|
||||
|
||||
if (NT_SUCCESS(Status))
|
||||
{
|
||||
KeInitializeEvent(&Event, NotificationEvent, FALSE);
|
||||
Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
|
||||
if (Status == STATUS_PENDING)
|
||||
{
|
||||
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
|
||||
Status = IoStatus.Status;
|
||||
}
|
||||
|
||||
MmUnlockPages(Mdl);
|
||||
}
|
||||
IoFreeMdl(Mdl);
|
||||
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
|
||||
{
|
||||
DPRINT1("IoPageWrite failed, Status %x\n", Status);
|
||||
return Status;
|
||||
}
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
NTSTATUS
|
||||
ReadWriteOrZero(
|
||||
_Inout_ PVOID BaseAddress,
|
||||
_Inout_opt_ PVOID Buffer,
|
||||
_In_ ULONG Length,
|
||||
_In_ CC_COPY_OPERATION Operation)
|
||||
{
|
||||
NTSTATUS Status = STATUS_SUCCESS;
|
||||
|
||||
if (Operation == CcOperationZero)
|
||||
{
|
||||
/* Zero */
|
||||
RtlZeroMemory(BaseAddress, Length);
|
||||
}
|
||||
else
|
||||
{
|
||||
_SEH2_TRY
|
||||
{
|
||||
if (Operation == CcOperationWrite)
|
||||
RtlCopyMemory(BaseAddress, Buffer, Length);
|
||||
else
|
||||
RtlCopyMemory(Buffer, BaseAddress, Length);
|
||||
}
|
||||
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
|
||||
{
|
||||
Status = _SEH2_GetExceptionCode();
|
||||
}
|
||||
_SEH2_END;
|
||||
}
|
||||
return Status;
|
||||
}
|
||||
|
||||
BOOLEAN
|
||||
CcCopyData (
|
||||
_In_ PFILE_OBJECT FileObject,
|
||||
_In_ LONGLONG FileOffset,
|
||||
_Inout_ PVOID Buffer,
|
||||
_In_ LONGLONG Length,
|
||||
_In_ CC_COPY_OPERATION Operation,
|
||||
_In_ BOOLEAN Wait,
|
||||
_Out_ PIO_STATUS_BLOCK IoStatus)
|
||||
{
|
||||
NTSTATUS Status;
|
||||
LONGLONG CurrentOffset;
|
||||
ULONG BytesCopied;
|
||||
KIRQL OldIrql;
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
PLIST_ENTRY ListEntry;
|
||||
PROS_VACB Vacb;
|
||||
ULONG PartialLength;
|
||||
PVOID BaseAddress;
|
||||
BOOLEAN Valid;
|
||||
PPRIVATE_CACHE_MAP PrivateCacheMap;
|
||||
|
||||
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
||||
PrivateCacheMap = FileObject->PrivateCacheMap;
|
||||
CurrentOffset = FileOffset;
|
||||
BytesCopied = 0;
|
||||
|
||||
if (!Wait)
|
||||
{
|
||||
/* test if the requested data is available */
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
|
||||
/* FIXME: this loop doesn't take into account areas that don't have
|
||||
* a VACB in the list yet */
|
||||
ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
||||
while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
|
||||
{
|
||||
Vacb = CONTAINING_RECORD(ListEntry,
|
||||
ROS_VACB,
|
||||
CacheMapVacbListEntry);
|
||||
ListEntry = ListEntry->Flink;
|
||||
if (!Vacb->Valid &&
|
||||
DoRangesIntersect(Vacb->FileOffset.QuadPart,
|
||||
VACB_MAPPING_GRANULARITY,
|
||||
CurrentOffset, Length))
|
||||
{
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
|
||||
/* data not available */
|
||||
return FALSE;
|
||||
}
|
||||
if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
|
||||
break;
|
||||
}
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
|
||||
}
|
||||
|
||||
PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
|
||||
if (PartialLength != 0)
|
||||
{
|
||||
PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
|
||||
Status = CcRosRequestVacb(SharedCacheMap,
|
||||
ROUND_DOWN(CurrentOffset,
|
||||
VACB_MAPPING_GRANULARITY),
|
||||
&BaseAddress,
|
||||
&Valid,
|
||||
&Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
ExRaiseStatus(Status);
|
||||
if (!Valid)
|
||||
{
|
||||
Status = CcReadVirtualAddress(Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
||||
ExRaiseStatus(Status);
|
||||
}
|
||||
}
|
||||
Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
|
||||
Buffer,
|
||||
PartialLength,
|
||||
Operation);
|
||||
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
|
||||
|
||||
if (!NT_SUCCESS(Status))
|
||||
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
|
||||
|
||||
Length -= PartialLength;
|
||||
CurrentOffset += PartialLength;
|
||||
BytesCopied += PartialLength;
|
||||
|
||||
if (Operation != CcOperationZero)
|
||||
Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
|
||||
}
|
||||
|
||||
while (Length > 0)
|
||||
{
|
||||
ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
|
||||
PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
|
||||
Status = CcRosRequestVacb(SharedCacheMap,
|
||||
CurrentOffset,
|
||||
&BaseAddress,
|
||||
&Valid,
|
||||
&Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
ExRaiseStatus(Status);
|
||||
if (!Valid &&
|
||||
(Operation == CcOperationRead ||
|
||||
PartialLength < VACB_MAPPING_GRANULARITY))
|
||||
{
|
||||
Status = CcReadVirtualAddress(Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
||||
ExRaiseStatus(Status);
|
||||
}
|
||||
}
|
||||
Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
|
||||
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
|
||||
|
||||
if (!NT_SUCCESS(Status))
|
||||
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
|
||||
|
||||
Length -= PartialLength;
|
||||
CurrentOffset += PartialLength;
|
||||
BytesCopied += PartialLength;
|
||||
|
||||
if (Operation != CcOperationZero)
|
||||
Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
|
||||
}
|
||||
|
||||
/* If that was a successful sync read operation, let's handle read ahead */
|
||||
if (Operation == CcOperationRead && Length == 0 && Wait)
|
||||
{
|
||||
/* If file isn't random access and next read may get us cross VACB boundary,
|
||||
* schedule next read
|
||||
*/
|
||||
if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) &&
|
||||
(CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + BytesCopied - 1) / VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
|
||||
}
|
||||
|
||||
/* And update read history in private cache map */
|
||||
PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
|
||||
PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
|
||||
PrivateCacheMap->FileOffset2.QuadPart = FileOffset;
|
||||
PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset + BytesCopied;
|
||||
}
|
||||
|
||||
IoStatus->Status = STATUS_SUCCESS;
|
||||
IoStatus->Information = BytesCopied;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
VOID
|
||||
CcPostDeferredWrites(VOID)
|
||||
{
|
||||
|
@ -492,8 +153,6 @@ CcPerformReadAhead(
|
|||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
PROS_VACB Vacb;
|
||||
ULONG PartialLength;
|
||||
PVOID BaseAddress;
|
||||
BOOLEAN Valid;
|
||||
ULONG Length;
|
||||
PPRIVATE_CACHE_MAP PrivateCacheMap;
|
||||
BOOLEAN Locked;
|
||||
|
@ -556,10 +215,7 @@ CcPerformReadAhead(
|
|||
{
|
||||
PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
|
||||
Status = CcRosRequestVacb(SharedCacheMap,
|
||||
ROUND_DOWN(CurrentOffset,
|
||||
VACB_MAPPING_GRANULARITY),
|
||||
&BaseAddress,
|
||||
&Valid,
|
||||
ROUND_DOWN(CurrentOffset, VACB_MAPPING_GRANULARITY),
|
||||
&Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
|
@ -567,15 +223,13 @@ CcPerformReadAhead(
|
|||
goto Clear;
|
||||
}
|
||||
|
||||
if (!Valid)
|
||||
Status = CcRosEnsureVacbResident(Vacb, TRUE, FALSE,
|
||||
CurrentOffset % VACB_MAPPING_GRANULARITY, PartialLength);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
Status = CcReadVirtualAddress(Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
||||
DPRINT1("Failed to read data: %lx!\n", Status);
|
||||
goto Clear;
|
||||
}
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
||||
DPRINT1("Failed to read data: %lx!\n", Status);
|
||||
goto Clear;
|
||||
}
|
||||
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
||||
|
@ -590,8 +244,6 @@ CcPerformReadAhead(
|
|||
PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
|
||||
Status = CcRosRequestVacb(SharedCacheMap,
|
||||
CurrentOffset,
|
||||
&BaseAddress,
|
||||
&Valid,
|
||||
&Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
|
@ -599,15 +251,12 @@ CcPerformReadAhead(
|
|||
goto Clear;
|
||||
}
|
||||
|
||||
if (!Valid)
|
||||
Status = CcRosEnsureVacbResident(Vacb, TRUE, FALSE, 0, PartialLength);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
Status = CcReadVirtualAddress(Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
||||
DPRINT1("Failed to read data: %lx!\n", Status);
|
||||
goto Clear;
|
||||
}
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
||||
DPRINT1("Failed to read data: %lx!\n", Status);
|
||||
goto Clear;
|
||||
}
|
||||
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
||||
|
@ -811,6 +460,12 @@ CcCopyRead (
|
|||
OUT PVOID Buffer,
|
||||
OUT PIO_STATUS_BLOCK IoStatus)
|
||||
{
|
||||
PROS_VACB Vacb;
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
||||
NTSTATUS Status;
|
||||
LONGLONG CurrentOffset;
|
||||
LONGLONG ReadEnd = FileOffset->QuadPart + Length;
|
||||
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
|
||||
FileObject, FileOffset->QuadPart, Length, Wait);
|
||||
|
||||
|
@ -819,13 +474,58 @@ CcCopyRead (
|
|||
FileObject, FileOffset->QuadPart, Length, Wait,
|
||||
Buffer, IoStatus);
|
||||
|
||||
return CcCopyData(FileObject,
|
||||
FileOffset->QuadPart,
|
||||
Buffer,
|
||||
Length,
|
||||
CcOperationRead,
|
||||
Wait,
|
||||
IoStatus);
|
||||
if (!SharedCacheMap)
|
||||
return FALSE;
|
||||
|
||||
/* Documented to ASSERT, but KMTests test this case... */
|
||||
// ASSERT((FileOffset->QuadPart + Length) <= SharedCacheMap->FileSize.QuadPart);
|
||||
|
||||
IoStatus->Status = STATUS_SUCCESS;
|
||||
IoStatus->Information = 0;
|
||||
|
||||
CurrentOffset = FileOffset->QuadPart;
|
||||
while(CurrentOffset < ReadEnd)
|
||||
{
|
||||
Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
ExRaiseStatus(Status);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
_SEH2_TRY
|
||||
{
|
||||
ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY;
|
||||
ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset);
|
||||
SIZE_T CopyLength = VacbLength;
|
||||
|
||||
if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength))
|
||||
return FALSE;
|
||||
|
||||
/* Do not copy past the section */
|
||||
if (CurrentOffset + VacbLength > SharedCacheMap->SectionSize.QuadPart)
|
||||
CopyLength = SharedCacheMap->SectionSize.QuadPart - CurrentOffset;
|
||||
if (CopyLength != 0)
|
||||
RtlCopyMemory(Buffer, (PUCHAR)Vacb->BaseAddress + VacbOffset, CopyLength);
|
||||
|
||||
/* Zero-out the buffer tail if needed */
|
||||
if (CopyLength < VacbLength)
|
||||
RtlZeroMemory((PUCHAR)Buffer + CopyLength, VacbLength - CopyLength);
|
||||
|
||||
IoStatus->Information += VacbLength;
|
||||
|
||||
Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength);
|
||||
CurrentOffset += VacbLength;
|
||||
Length -= VacbLength;
|
||||
}
|
||||
_SEH2_FINALLY
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
||||
}
|
||||
_SEH2_END;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -840,7 +540,11 @@ CcCopyWrite (
|
|||
IN BOOLEAN Wait,
|
||||
IN PVOID Buffer)
|
||||
{
|
||||
IO_STATUS_BLOCK IoStatus;
|
||||
PROS_VACB Vacb;
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
||||
NTSTATUS Status;
|
||||
LONGLONG CurrentOffset;
|
||||
LONGLONG WriteEnd = FileOffset->QuadPart + Length;
|
||||
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
|
||||
FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
|
||||
|
@ -849,13 +553,48 @@ CcCopyWrite (
|
|||
"Length %lu, Wait %u, Buffer 0x%p)\n",
|
||||
FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
|
||||
|
||||
return CcCopyData(FileObject,
|
||||
FileOffset->QuadPart,
|
||||
Buffer,
|
||||
Length,
|
||||
CcOperationWrite,
|
||||
Wait,
|
||||
&IoStatus);
|
||||
if (!SharedCacheMap)
|
||||
return FALSE;
|
||||
|
||||
/* FIXME: Honor FileObject FO_WRITE_THROUGH flag */
|
||||
|
||||
ASSERT((FileOffset->QuadPart + Length) <= SharedCacheMap->FileSize.QuadPart);
|
||||
|
||||
CurrentOffset = FileOffset->QuadPart;
|
||||
while(CurrentOffset < WriteEnd)
|
||||
{
|
||||
ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY;
|
||||
ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset);
|
||||
|
||||
Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
ExRaiseStatus(Status);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
_SEH2_TRY
|
||||
{
|
||||
if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength))
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
RtlCopyMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), Buffer, VacbLength);
|
||||
|
||||
Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength);
|
||||
CurrentOffset += VacbLength;
|
||||
Length -= VacbLength;
|
||||
}
|
||||
_SEH2_FINALLY
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, TRUE, FALSE);
|
||||
}
|
||||
_SEH2_END;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -999,11 +738,8 @@ CcZeroData (
|
|||
NTSTATUS Status;
|
||||
LARGE_INTEGER WriteOffset;
|
||||
LONGLONG Length;
|
||||
ULONG CurrentLength;
|
||||
PMDL Mdl;
|
||||
ULONG i;
|
||||
IO_STATUS_BLOCK Iosb;
|
||||
KEVENT Event;
|
||||
PROS_VACB Vacb;
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
||||
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
|
||||
FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
|
||||
|
@ -1015,9 +751,14 @@ CcZeroData (
|
|||
Length = EndOffset->QuadPart - StartOffset->QuadPart;
|
||||
WriteOffset.QuadPart = StartOffset->QuadPart;
|
||||
|
||||
if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
|
||||
if (!SharedCacheMap || (FileObject->Flags & FO_WRITE_THROUGH))
|
||||
{
|
||||
/* File is not cached */
|
||||
/* Make this a non-cached write */
|
||||
IO_STATUS_BLOCK Iosb;
|
||||
KEVENT Event;
|
||||
PMDL Mdl;
|
||||
ULONG i;
|
||||
ULONG CurrentLength;
|
||||
|
||||
Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
|
||||
|
||||
|
@ -1032,7 +773,7 @@ CcZeroData (
|
|||
CurrentLength = Length;
|
||||
}
|
||||
MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
|
||||
Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
|
||||
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
|
||||
for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
|
||||
{
|
||||
((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
|
||||
|
@ -1055,18 +796,42 @@ CcZeroData (
|
|||
WriteOffset.QuadPart += CurrentLength;
|
||||
Length -= CurrentLength;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
IO_STATUS_BLOCK IoStatus;
|
||||
|
||||
return CcCopyData(FileObject,
|
||||
WriteOffset.QuadPart,
|
||||
NULL,
|
||||
Length,
|
||||
CcOperationZero,
|
||||
Wait,
|
||||
&IoStatus);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
ASSERT(EndOffset->QuadPart <= SharedCacheMap->FileSize.QuadPart);
|
||||
|
||||
while(WriteOffset.QuadPart < EndOffset->QuadPart)
|
||||
{
|
||||
ULONG VacbOffset = WriteOffset.QuadPart % VACB_MAPPING_GRANULARITY;
|
||||
ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset);
|
||||
|
||||
Status = CcRosGetVacb(SharedCacheMap, WriteOffset.QuadPart, &Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
ExRaiseStatus(Status);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
_SEH2_TRY
|
||||
{
|
||||
if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength))
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
RtlZeroMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), VacbLength);
|
||||
|
||||
WriteOffset.QuadPart += VacbLength;
|
||||
Length -= VacbLength;
|
||||
}
|
||||
_SEH2_FINALLY
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, TRUE, FALSE);
|
||||
}
|
||||
_SEH2_END;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
|
|
|
@ -10,13 +10,10 @@
|
|||
/* INCLUDES ******************************************************************/
|
||||
|
||||
#include <ntoskrnl.h>
|
||||
|
||||
#define NDEBUG
|
||||
#include <debug.h>
|
||||
|
||||
/* GLOBALS *****************************************************************/
|
||||
|
||||
NTSTATUS CcRosInternalFreeVacb(PROS_VACB Vacb);
|
||||
|
||||
/* FUNCTIONS *****************************************************************/
|
||||
|
||||
/*
|
||||
|
@ -272,8 +269,9 @@ CcSetFileSizes (
|
|||
IN PFILE_OBJECT FileObject,
|
||||
IN PCC_FILE_SIZES FileSizes)
|
||||
{
|
||||
KIRQL oldirql;
|
||||
KIRQL OldIrql;
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
LARGE_INTEGER OldSectionSize;
|
||||
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileSizes=%p\n",
|
||||
FileObject, FileSizes);
|
||||
|
@ -294,7 +292,14 @@ CcSetFileSizes (
|
|||
if (SharedCacheMap == NULL)
|
||||
return;
|
||||
|
||||
if (FileSizes->AllocationSize.QuadPart < SharedCacheMap->SectionSize.QuadPart)
|
||||
/* Update the relevant fields */
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
|
||||
OldSectionSize = SharedCacheMap->SectionSize;
|
||||
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
|
||||
SharedCacheMap->FileSize = FileSizes->FileSize;
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
|
||||
|
||||
if (FileSizes->AllocationSize.QuadPart < OldSectionSize.QuadPart)
|
||||
{
|
||||
CcPurgeCacheSection(FileObject->SectionObjectPointer,
|
||||
&FileSizes->AllocationSize,
|
||||
|
@ -303,46 +308,9 @@ CcSetFileSizes (
|
|||
}
|
||||
else
|
||||
{
|
||||
PROS_VACB LastVacb;
|
||||
|
||||
/*
|
||||
* If file (allocation) size has increased, then we need to check whether
|
||||
* it just grows in a single VACB (the last one).
|
||||
* If so, we must mark the VACB as invalid to trigger a read to the
|
||||
* FSD at the next VACB usage, and thus avoid returning garbage
|
||||
*/
|
||||
|
||||
/* Check for allocation size and the last VACB */
|
||||
if (SharedCacheMap->SectionSize.QuadPart < FileSizes->AllocationSize.QuadPart &&
|
||||
SharedCacheMap->SectionSize.QuadPart % VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
LastVacb = CcRosLookupVacb(SharedCacheMap,
|
||||
SharedCacheMap->SectionSize.QuadPart);
|
||||
if (LastVacb != NULL)
|
||||
{
|
||||
/* Mark it as invalid */
|
||||
CcRosReleaseVacb(SharedCacheMap, LastVacb, LastVacb->Dirty ? LastVacb->Valid : FALSE, FALSE, FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for file size and the last VACB */
|
||||
if (SharedCacheMap->FileSize.QuadPart < FileSizes->FileSize.QuadPart &&
|
||||
SharedCacheMap->FileSize.QuadPart % VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
LastVacb = CcRosLookupVacb(SharedCacheMap,
|
||||
SharedCacheMap->FileSize.QuadPart);
|
||||
if (LastVacb != NULL)
|
||||
{
|
||||
/* Mark it as invalid */
|
||||
CcRosReleaseVacb(SharedCacheMap, LastVacb, LastVacb->Dirty ? LastVacb->Valid : FALSE, FALSE, FALSE);
|
||||
}
|
||||
}
|
||||
/* Extend our section object */
|
||||
MmExtendSection(SharedCacheMap->Section, &SharedCacheMap->SectionSize);
|
||||
}
|
||||
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
|
||||
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
|
||||
SharedCacheMap->FileSize = FileSizes->FileSize;
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -67,91 +67,6 @@ CcpFindBcb(
|
|||
return (Found ? Bcb : NULL);
|
||||
}
|
||||
|
||||
static
|
||||
BOOLEAN
|
||||
NTAPI
|
||||
CcpMapData(
|
||||
IN PROS_SHARED_CACHE_MAP SharedCacheMap,
|
||||
IN PLARGE_INTEGER FileOffset,
|
||||
IN ULONG Length,
|
||||
IN ULONG Flags,
|
||||
OUT PROS_VACB *pVacb,
|
||||
OUT PVOID *pBuffer)
|
||||
{
|
||||
LONGLONG ReadOffset, BaseOffset;
|
||||
BOOLEAN Valid;
|
||||
PROS_VACB Vacb;
|
||||
NTSTATUS Status;
|
||||
LONGLONG ROffset;
|
||||
|
||||
ReadOffset = FileOffset->QuadPart;
|
||||
|
||||
DPRINT("SectionSize %I64x, FileSize %I64x\n",
|
||||
SharedCacheMap->SectionSize.QuadPart,
|
||||
SharedCacheMap->FileSize.QuadPart);
|
||||
|
||||
if (ReadOffset % VACB_MAPPING_GRANULARITY + Length > VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
|
||||
SharedCacheMap->FileObject, FileOffset, Length, Flags);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if (!BooleanFlagOn(Flags, MAP_NO_READ))
|
||||
{
|
||||
static int Warned = 0;
|
||||
|
||||
SetFlag(Flags, MAP_NO_READ);
|
||||
if (!Warned)
|
||||
{
|
||||
DPRINT1("Mapping/pinning with no read not implemented. Forcing read, might fail if wait not allowed\n");
|
||||
Warned++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Properly round offset and call internal helper for getting a VACB */
|
||||
ROffset = ROUND_DOWN(ReadOffset, VACB_MAPPING_GRANULARITY);
|
||||
Status = CcRosGetVacb(SharedCacheMap,
|
||||
ROffset,
|
||||
&BaseOffset,
|
||||
pBuffer,
|
||||
&Valid,
|
||||
&Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
|
||||
SharedCacheMap->FileObject, FileOffset, Length, Flags);
|
||||
ExRaiseStatus(Status);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if (!Valid && BooleanFlagOn(Flags, MAP_NO_READ))
|
||||
{
|
||||
if (!BooleanFlagOn(Flags, MAP_WAIT))
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
|
||||
SharedCacheMap->FileObject, FileOffset, Length, Flags);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
Status = CcReadVirtualAddress(Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
|
||||
SharedCacheMap->FileObject, FileOffset, Length, Flags);
|
||||
ExRaiseStatus(Status);
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
*pBuffer = (PUCHAR)*pBuffer + ReadOffset % VACB_MAPPING_GRANULARITY;
|
||||
*pVacb = Vacb;
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static
|
||||
VOID
|
||||
CcpDereferenceBcb(
|
||||
|
@ -304,44 +219,44 @@ CcpPinData(
|
|||
OUT PVOID * Buffer)
|
||||
{
|
||||
PINTERNAL_BCB NewBcb;
|
||||
BOOLEAN Result;
|
||||
PROS_VACB Vacb;
|
||||
KIRQL OldIrql;
|
||||
ULONG MapFlags;
|
||||
ULONG VacbOffset;
|
||||
NTSTATUS Status;
|
||||
BOOLEAN Result;
|
||||
|
||||
VacbOffset = (ULONG)(FileOffset->QuadPart % VACB_MAPPING_GRANULARITY);
|
||||
/* This seems to be valid, according to KMTests */
|
||||
if ((VacbOffset + Length) > VACB_MAPPING_GRANULARITY)
|
||||
Length = VACB_MAPPING_GRANULARITY - VacbOffset;
|
||||
|
||||
KeAcquireSpinLock(&SharedCacheMap->BcbSpinLock, &OldIrql);
|
||||
NewBcb = CcpFindBcb(SharedCacheMap, FileOffset, Length, TRUE);
|
||||
|
||||
if (NewBcb != NULL)
|
||||
{
|
||||
BOOLEAN Result;
|
||||
|
||||
++NewBcb->RefCount;
|
||||
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
|
||||
|
||||
if (BooleanFlagOn(Flags, PIN_EXCLUSIVE))
|
||||
{
|
||||
Result = ExAcquireResourceExclusiveLite(&NewBcb->Lock, BooleanFlagOn(Flags, PIN_WAIT));
|
||||
}
|
||||
else
|
||||
{
|
||||
Result = ExAcquireSharedStarveExclusive(&NewBcb->Lock, BooleanFlagOn(Flags, PIN_WAIT));
|
||||
}
|
||||
|
||||
if (!Result)
|
||||
{
|
||||
CcpDereferenceBcb(SharedCacheMap, NewBcb);
|
||||
NewBcb = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
NewBcb->PinCount++;
|
||||
*Bcb = NewBcb;
|
||||
*Buffer = (PUCHAR)NewBcb->Vacb->BaseAddress + FileOffset->QuadPart % VACB_MAPPING_GRANULARITY;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
return Result;
|
||||
NewBcb->PinCount++;
|
||||
}
|
||||
else
|
||||
{
|
||||
LONGLONG ROffset;
|
||||
PROS_VACB Vacb;
|
||||
|
||||
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
|
||||
|
||||
if (BooleanFlagOn(Flags, PIN_IF_BCB))
|
||||
|
@ -349,29 +264,49 @@ CcpPinData(
|
|||
return FALSE;
|
||||
}
|
||||
|
||||
MapFlags = Flags & PIN_WAIT;
|
||||
if (BooleanFlagOn(Flags, PIN_NO_READ))
|
||||
/* Properly round offset and call internal helper for getting a VACB */
|
||||
ROffset = ROUND_DOWN(FileOffset->QuadPart, VACB_MAPPING_GRANULARITY);
|
||||
Status = CcRosGetVacb(SharedCacheMap, ROffset, &Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
SetFlag(MapFlags, MAP_NO_READ);
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
|
||||
SharedCacheMap->FileObject, FileOffset, Length, Flags);
|
||||
ExRaiseStatus(Status);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
Result = CcpMapData(SharedCacheMap, FileOffset, Length, MapFlags, &Vacb, Buffer);
|
||||
if (Result)
|
||||
NewBcb = CcpGetAppropriateBcb(SharedCacheMap, Vacb, FileOffset, Length, Flags, TRUE);
|
||||
if (NewBcb == NULL)
|
||||
{
|
||||
NewBcb = CcpGetAppropriateBcb(SharedCacheMap, Vacb, FileOffset, Length, Flags, TRUE);
|
||||
if (NewBcb == NULL)
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
||||
Result = FALSE;
|
||||
}
|
||||
else
|
||||
{
|
||||
*Bcb = NewBcb;
|
||||
}
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
return Result;
|
||||
Result = FALSE;
|
||||
_SEH2_TRY
|
||||
{
|
||||
/* Ensure the pages are resident */
|
||||
Result = CcRosEnsureVacbResident(NewBcb->Vacb,
|
||||
BooleanFlagOn(Flags, PIN_WAIT),
|
||||
BooleanFlagOn(Flags, PIN_NO_READ),
|
||||
VacbOffset, Length);
|
||||
}
|
||||
_SEH2_FINALLY
|
||||
{
|
||||
if (!Result)
|
||||
{
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
|
||||
SharedCacheMap->FileObject, FileOffset, Length, Flags);
|
||||
CcUnpinData(NewBcb);
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
_SEH2_END;
|
||||
|
||||
*Bcb = NewBcb;
|
||||
*Buffer = (PVOID)((ULONG_PTR)NewBcb->Vacb->BaseAddress + VacbOffset);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -387,13 +322,15 @@ CcMapData (
|
|||
OUT PVOID *pBcb,
|
||||
OUT PVOID *pBuffer)
|
||||
{
|
||||
BOOLEAN Ret;
|
||||
KIRQL OldIrql;
|
||||
PINTERNAL_BCB iBcb;
|
||||
PROS_VACB Vacb;
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
ULONG VacbOffset;
|
||||
NTSTATUS Status;
|
||||
BOOLEAN Result;
|
||||
|
||||
DPRINT("CcMapData(FileObject 0x%p, FileOffset %I64x, Length %lu, Flags 0x%lx,"
|
||||
CCTRACE(CC_API_DEBUG, "CcMapData(FileObject 0x%p, FileOffset 0x%I64x, Length %lu, Flags 0x%lx,"
|
||||
" pBcb 0x%p, pBuffer 0x%p)\n", FileObject, FileOffset->QuadPart,
|
||||
Length, Flags, pBcb, pBuffer);
|
||||
|
||||
|
@ -413,6 +350,11 @@ CcMapData (
|
|||
++CcMapDataNoWait;
|
||||
}
|
||||
|
||||
VacbOffset = (ULONG)(FileOffset->QuadPart % VACB_MAPPING_GRANULARITY);
|
||||
/* KMTests seem to show that it is allowed to call accross mapping granularity */
|
||||
if ((VacbOffset + Length) > VACB_MAPPING_GRANULARITY)
|
||||
Length = VACB_MAPPING_GRANULARITY - VacbOffset;
|
||||
|
||||
KeAcquireSpinLock(&SharedCacheMap->BcbSpinLock, &OldIrql);
|
||||
iBcb = CcpFindBcb(SharedCacheMap, FileOffset, Length, FALSE);
|
||||
|
||||
|
@ -420,34 +362,54 @@ CcMapData (
|
|||
{
|
||||
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
|
||||
|
||||
Ret = CcpMapData(SharedCacheMap, FileOffset, Length, Flags, &Vacb, pBuffer);
|
||||
if (Ret)
|
||||
/* Call internal helper for getting a VACB */
|
||||
Status = CcRosGetVacb(SharedCacheMap, FileOffset->QuadPart, &Vacb);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
iBcb = CcpGetAppropriateBcb(SharedCacheMap, Vacb, FileOffset, Length, 0, FALSE);
|
||||
if (iBcb == NULL)
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
||||
Ret = FALSE;
|
||||
}
|
||||
else
|
||||
{
|
||||
*pBcb = iBcb;
|
||||
}
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
|
||||
SharedCacheMap->FileObject, FileOffset, Length, Flags);
|
||||
ExRaiseStatus(Status);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
iBcb = CcpGetAppropriateBcb(SharedCacheMap, Vacb, FileOffset, Length, 0, FALSE);
|
||||
if (iBcb == NULL)
|
||||
{
|
||||
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> FALSE\n",
|
||||
SharedCacheMap->FileObject, FileOffset, Length, Flags);
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
++iBcb->RefCount;
|
||||
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
|
||||
|
||||
*pBcb = iBcb;
|
||||
*pBuffer = (PUCHAR)iBcb->Vacb->BaseAddress + FileOffset->QuadPart % VACB_MAPPING_GRANULARITY;
|
||||
Ret = TRUE;
|
||||
}
|
||||
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> %d Bcb=%p\n",
|
||||
FileObject, FileOffset, Length, Flags, Ret, *pBcb);
|
||||
return Ret;
|
||||
_SEH2_TRY
|
||||
{
|
||||
Result = FALSE;
|
||||
/* Ensure the pages are resident */
|
||||
Result = CcRosEnsureVacbResident(iBcb->Vacb, BooleanFlagOn(Flags, MAP_WAIT),
|
||||
BooleanFlagOn(Flags, MAP_NO_READ), VacbOffset, Length);
|
||||
}
|
||||
_SEH2_FINALLY
|
||||
{
|
||||
if (!Result)
|
||||
{
|
||||
CcpDereferenceBcb(SharedCacheMap, iBcb);
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
_SEH2_END;
|
||||
|
||||
*pBcb = iBcb;
|
||||
*pBuffer = (PVOID)((ULONG_PTR)iBcb->Vacb->BaseAddress + VacbOffset);
|
||||
|
||||
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%p Length=%lu Flags=0x%lx -> TRUE Bcb=%p, Buffer %p\n",
|
||||
FileObject, FileOffset, Length, Flags, *pBcb, *pBuffer);
|
||||
return Result;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -156,20 +156,29 @@ CcRosTraceCacheMap (
|
|||
#endif
|
||||
}
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
MmFlushVirtualMemory(IN PEPROCESS Process,
|
||||
IN OUT PVOID *BaseAddress,
|
||||
IN OUT PSIZE_T RegionSize,
|
||||
OUT PIO_STATUS_BLOCK IoStatusBlock);
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
CcRosFlushVacb (
|
||||
PROS_VACB Vacb)
|
||||
{
|
||||
IO_STATUS_BLOCK Iosb;
|
||||
SIZE_T FlushSize = min(VACB_MAPPING_GRANULARITY,
|
||||
Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
|
||||
NTSTATUS Status;
|
||||
|
||||
CcRosUnmarkDirtyVacb(Vacb, TRUE);
|
||||
|
||||
Status = CcWriteVirtualAddress(Vacb);
|
||||
Status = MmFlushVirtualMemory(NULL, &Vacb->BaseAddress, &FlushSize, &Iosb);
|
||||
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
CcRosMarkDirtyVacb(Vacb);
|
||||
}
|
||||
|
||||
return Status;
|
||||
}
|
||||
|
@ -234,6 +243,8 @@ CcRosFlushDirtyPages (
|
|||
current->SharedCacheMap->LazyWriteContext, Wait);
|
||||
if (!Locked)
|
||||
{
|
||||
DPRINT("Not locked!");
|
||||
ASSERT(!Wait);
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
CcRosVacbDecRefCount(current);
|
||||
continue;
|
||||
|
@ -264,15 +275,18 @@ CcRosFlushDirtyPages (
|
|||
PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
(*Count) += PagesFreed;
|
||||
|
||||
/* Make sure we don't overflow target! */
|
||||
if (Target < PagesFreed)
|
||||
if (!Wait)
|
||||
{
|
||||
/* If we would have, jump to zero directly */
|
||||
Target = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
Target -= PagesFreed;
|
||||
/* Make sure we don't overflow target! */
|
||||
if (Target < PagesFreed)
|
||||
{
|
||||
/* If we would have, jump to zero directly */
|
||||
Target = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
Target -= PagesFreed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -286,136 +300,6 @@ CcRosFlushDirtyPages (
|
|||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
NTSTATUS
|
||||
CcRosTrimCache (
|
||||
ULONG Target,
|
||||
ULONG Priority,
|
||||
PULONG NrFreed)
|
||||
/*
|
||||
* FUNCTION: Try to free some memory from the file cache.
|
||||
* ARGUMENTS:
|
||||
* Target - The number of pages to be freed.
|
||||
* Priority - The priority of free (currently unused).
|
||||
* NrFreed - Points to a variable where the number of pages
|
||||
* actually freed is returned.
|
||||
*/
|
||||
{
|
||||
PLIST_ENTRY current_entry;
|
||||
PROS_VACB current;
|
||||
ULONG PagesFreed;
|
||||
KIRQL oldIrql;
|
||||
LIST_ENTRY FreeList;
|
||||
PFN_NUMBER Page;
|
||||
ULONG i;
|
||||
BOOLEAN FlushedPages = FALSE;
|
||||
|
||||
DPRINT("CcRosTrimCache(Target %lu)\n", Target);
|
||||
|
||||
InitializeListHead(&FreeList);
|
||||
|
||||
*NrFreed = 0;
|
||||
|
||||
retry:
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
current_entry = VacbLruListHead.Flink;
|
||||
while (current_entry != &VacbLruListHead)
|
||||
{
|
||||
ULONG Refs;
|
||||
|
||||
current = CONTAINING_RECORD(current_entry,
|
||||
ROS_VACB,
|
||||
VacbLruListEntry);
|
||||
current_entry = current_entry->Flink;
|
||||
|
||||
KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
|
||||
/* Reference the VACB */
|
||||
CcRosVacbIncRefCount(current);
|
||||
|
||||
/* Check if it's mapped and not dirty */
|
||||
if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty)
|
||||
{
|
||||
/* We have to break these locks because Cc sucks */
|
||||
KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
/* Page out the VACB */
|
||||
for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
|
||||
{
|
||||
Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
|
||||
|
||||
MmPageOutPhysicalAddress(Page);
|
||||
}
|
||||
|
||||
/* Reacquire the locks */
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
}
|
||||
|
||||
/* Dereference the VACB */
|
||||
Refs = CcRosVacbDecRefCount(current);
|
||||
|
||||
/* Check if we can free this entry now */
|
||||
if (Refs < 2)
|
||||
{
|
||||
ASSERT(!current->Dirty);
|
||||
ASSERT(!current->MappedCount);
|
||||
ASSERT(Refs == 1);
|
||||
|
||||
RemoveEntryList(¤t->CacheMapVacbListEntry);
|
||||
RemoveEntryList(¤t->VacbLruListEntry);
|
||||
InitializeListHead(¤t->VacbLruListEntry);
|
||||
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
|
||||
|
||||
/* Calculate how many pages we freed for Mm */
|
||||
PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
|
||||
Target -= PagesFreed;
|
||||
(*NrFreed) += PagesFreed;
|
||||
}
|
||||
|
||||
KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
}
|
||||
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
/* Try flushing pages if we haven't met our target */
|
||||
if ((Target > 0) && !FlushedPages)
|
||||
{
|
||||
/* Flush dirty pages to disk */
|
||||
CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
|
||||
FlushedPages = TRUE;
|
||||
|
||||
/* We can only swap as many pages as we flushed */
|
||||
if (PagesFreed < Target) Target = PagesFreed;
|
||||
|
||||
/* Check if we flushed anything */
|
||||
if (PagesFreed != 0)
|
||||
{
|
||||
/* Try again after flushing dirty pages */
|
||||
DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
while (!IsListEmpty(&FreeList))
|
||||
{
|
||||
ULONG Refs;
|
||||
|
||||
current_entry = RemoveHeadList(&FreeList);
|
||||
current = CONTAINING_RECORD(current_entry,
|
||||
ROS_VACB,
|
||||
CacheMapVacbListEntry);
|
||||
InitializeListHead(¤t->CacheMapVacbListEntry);
|
||||
Refs = CcRosVacbDecRefCount(current);
|
||||
ASSERT(Refs == 0);
|
||||
}
|
||||
|
||||
DPRINT("Evicted %lu cache pages\n", (*NrFreed));
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
CcRosReleaseVacb (
|
||||
|
@ -504,6 +388,7 @@ CcRosMarkDirtyVacb (
|
|||
{
|
||||
KIRQL oldIrql;
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
ULONG Length = VACB_MAPPING_GRANULARITY;
|
||||
|
||||
SharedCacheMap = Vacb->SharedCacheMap;
|
||||
|
||||
|
@ -513,8 +398,12 @@ CcRosMarkDirtyVacb (
|
|||
ASSERT(!Vacb->Dirty);
|
||||
|
||||
InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
|
||||
CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
#if 0
|
||||
if (Vacb->FileOffset.QuadPart + Length > SharedCacheMap->SectionSize.QuadPart)
|
||||
Length = SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
|
||||
#endif
|
||||
CcTotalDirtyPages += PAGE_ROUND_UP(Length) / PAGE_SIZE;
|
||||
Vacb->SharedCacheMap->DirtyPages += PAGE_ROUND_UP(Length) / PAGE_SIZE;
|
||||
CcRosVacbIncRefCount(Vacb);
|
||||
|
||||
/* Move to the tail of the LRU list */
|
||||
|
@ -531,6 +420,9 @@ CcRosMarkDirtyVacb (
|
|||
CcScheduleLazyWriteScan(FALSE);
|
||||
}
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
/* Tell Mm */
|
||||
MmMakePagesDirty(NULL, Vacb->BaseAddress, Length);
|
||||
}
|
||||
|
||||
VOID
|
||||
|
@ -541,6 +433,7 @@ CcRosUnmarkDirtyVacb (
|
|||
{
|
||||
KIRQL oldIrql;
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
ULONG Length = VACB_MAPPING_GRANULARITY;
|
||||
|
||||
SharedCacheMap = Vacb->SharedCacheMap;
|
||||
|
||||
|
@ -556,8 +449,14 @@ CcRosUnmarkDirtyVacb (
|
|||
|
||||
RemoveEntryList(&Vacb->DirtyVacbListEntry);
|
||||
InitializeListHead(&Vacb->DirtyVacbListEntry);
|
||||
CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
|
||||
#if 0
|
||||
if (Vacb->FileOffset.QuadPart + Length > SharedCacheMap->SectionSize.QuadPart)
|
||||
Length = SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
|
||||
#endif
|
||||
CcTotalDirtyPages -= PAGE_ROUND_UP(Length) / PAGE_SIZE;
|
||||
Vacb->SharedCacheMap->DirtyPages -= PAGE_ROUND_UP(Length) / PAGE_SIZE;
|
||||
|
||||
CcRosVacbDecRefCount(Vacb);
|
||||
|
||||
if (LockViews)
|
||||
|
@ -626,73 +525,6 @@ CcRosUnmapVacb (
|
|||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static
|
||||
NTSTATUS
|
||||
CcRosMapVacbInKernelSpace(
|
||||
PROS_VACB Vacb)
|
||||
{
|
||||
ULONG i;
|
||||
NTSTATUS Status;
|
||||
ULONG_PTR NumberOfPages;
|
||||
PVOID BaseAddress = NULL;
|
||||
|
||||
/* Create a memory area. */
|
||||
MmLockAddressSpace(MmGetKernelAddressSpace());
|
||||
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
|
||||
0, // nothing checks for VACB mareas, so set to 0
|
||||
&BaseAddress,
|
||||
VACB_MAPPING_GRANULARITY,
|
||||
PAGE_READWRITE,
|
||||
(PMEMORY_AREA*)&Vacb->MemoryArea,
|
||||
0,
|
||||
PAGE_SIZE);
|
||||
ASSERT(Vacb->BaseAddress == NULL);
|
||||
Vacb->BaseAddress = BaseAddress;
|
||||
MmUnlockAddressSpace(MmGetKernelAddressSpace());
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
|
||||
return Status;
|
||||
}
|
||||
|
||||
ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
|
||||
ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
|
||||
ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
|
||||
|
||||
/* Create a virtual mapping for this memory area */
|
||||
NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
|
||||
for (i = 0; i < NumberOfPages; i++)
|
||||
{
|
||||
PFN_NUMBER PageFrameNumber;
|
||||
|
||||
MI_SET_USAGE(MI_USAGE_CACHE);
|
||||
Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
|
||||
if (PageFrameNumber == 0)
|
||||
{
|
||||
DPRINT1("Unable to allocate page\n");
|
||||
KeBugCheck(MEMORY_MANAGEMENT);
|
||||
}
|
||||
|
||||
ASSERT(BaseAddress == Vacb->BaseAddress);
|
||||
ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
|
||||
ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
|
||||
ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
|
||||
|
||||
Status = MmCreateVirtualMapping(NULL,
|
||||
(PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
|
||||
PAGE_READWRITE,
|
||||
&PageFrameNumber,
|
||||
1);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
DPRINT1("Unable to create virtual mapping\n");
|
||||
KeBugCheck(MEMORY_MANAGEMENT);
|
||||
}
|
||||
}
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static
|
||||
BOOLEAN
|
||||
CcRosFreeUnusedVacb (
|
||||
|
@ -789,6 +621,7 @@ CcRosCreateVacb (
|
|||
KIRQL oldIrql;
|
||||
ULONG Refs;
|
||||
BOOLEAN Retried;
|
||||
SIZE_T ViewSize = VACB_MAPPING_GRANULARITY;
|
||||
|
||||
ASSERT(SharedCacheMap);
|
||||
|
||||
|
@ -823,8 +656,9 @@ CcRosCreateVacb (
|
|||
|
||||
Retried = FALSE;
|
||||
Retry:
|
||||
/* Map VACB in kernel space */
|
||||
Status = CcRosMapVacbInKernelSpace(current);
|
||||
/* Map VACB in system space */
|
||||
Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset);
|
||||
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
ULONG Freed;
|
||||
|
@ -932,14 +766,50 @@ Retry:
|
|||
return Status;
|
||||
}
|
||||
|
||||
BOOLEAN
|
||||
NTAPI
|
||||
CcRosEnsureVacbResident(
|
||||
_In_ PROS_VACB Vacb,
|
||||
_In_ BOOLEAN Wait,
|
||||
_In_ BOOLEAN NoRead,
|
||||
_In_ ULONG Offset,
|
||||
_In_ ULONG Length
|
||||
)
|
||||
{
|
||||
PVOID BaseAddress;
|
||||
|
||||
ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY);
|
||||
|
||||
if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->FileSize.QuadPart)
|
||||
return FALSE;
|
||||
|
||||
BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset);
|
||||
|
||||
/* Check if the pages are resident */
|
||||
if (!MmArePagesResident(NULL, BaseAddress, Length))
|
||||
{
|
||||
if (!Wait)
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if (!NoRead)
|
||||
{
|
||||
NTSTATUS Status = MmMakePagesResident(NULL, BaseAddress, Length);
|
||||
if (!NT_SUCCESS(Status))
|
||||
ExRaiseStatus(Status);
|
||||
}
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
CcRosGetVacb (
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
||||
LONGLONG FileOffset,
|
||||
PLONGLONG BaseOffset,
|
||||
PVOID* BaseAddress,
|
||||
PBOOLEAN UptoDate,
|
||||
PROS_VACB *Vacb)
|
||||
{
|
||||
PROS_VACB current;
|
||||
|
@ -978,13 +848,9 @@ CcRosGetVacb (
|
|||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
/*
|
||||
* Return information about the VACB to the caller.
|
||||
* Return the VACB to the caller.
|
||||
*/
|
||||
*UptoDate = current->Valid;
|
||||
*BaseAddress = current->BaseAddress;
|
||||
DPRINT("*BaseAddress %p\n", *BaseAddress);
|
||||
*Vacb = current;
|
||||
*BaseOffset = current->FileOffset.QuadPart;
|
||||
|
||||
ASSERT(Refs > 1);
|
||||
|
||||
|
@ -996,14 +862,11 @@ NTAPI
|
|||
CcRosRequestVacb (
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
||||
LONGLONG FileOffset,
|
||||
PVOID* BaseAddress,
|
||||
PBOOLEAN UptoDate,
|
||||
PROS_VACB *Vacb)
|
||||
/*
|
||||
* FUNCTION: Request a page mapping for a shared cache map
|
||||
*/
|
||||
{
|
||||
LONGLONG BaseOffset;
|
||||
|
||||
ASSERT(SharedCacheMap);
|
||||
|
||||
|
@ -1016,30 +879,9 @@ CcRosRequestVacb (
|
|||
|
||||
return CcRosGetVacb(SharedCacheMap,
|
||||
FileOffset,
|
||||
&BaseOffset,
|
||||
BaseAddress,
|
||||
UptoDate,
|
||||
Vacb);
|
||||
}
|
||||
|
||||
static
|
||||
VOID
|
||||
CcFreeCachePage (
|
||||
PVOID Context,
|
||||
MEMORY_AREA* MemoryArea,
|
||||
PVOID Address,
|
||||
PFN_NUMBER Page,
|
||||
SWAPENTRY SwapEntry,
|
||||
BOOLEAN Dirty)
|
||||
{
|
||||
ASSERT(SwapEntry == 0);
|
||||
if (Page != 0)
|
||||
{
|
||||
ASSERT(MmGetReferenceCountPage(Page) == 1);
|
||||
MmReleasePageMemoryConsumer(MC_CACHE, Page);
|
||||
}
|
||||
}
|
||||
|
||||
NTSTATUS
|
||||
CcRosInternalFreeVacb (
|
||||
PROS_VACB Vacb)
|
||||
|
@ -1047,6 +889,8 @@ CcRosInternalFreeVacb (
|
|||
* FUNCTION: Releases a VACB associated with a shared cache map
|
||||
*/
|
||||
{
|
||||
NTSTATUS Status;
|
||||
|
||||
DPRINT("Freeing VACB 0x%p\n", Vacb);
|
||||
#if DBG
|
||||
if (Vacb->SharedCacheMap->Trace)
|
||||
|
@ -1055,12 +899,14 @@ CcRosInternalFreeVacb (
|
|||
}
|
||||
#endif
|
||||
|
||||
MmLockAddressSpace(MmGetKernelAddressSpace());
|
||||
MmFreeMemoryArea(MmGetKernelAddressSpace(),
|
||||
Vacb->MemoryArea,
|
||||
CcFreeCachePage,
|
||||
NULL);
|
||||
MmUnlockAddressSpace(MmGetKernelAddressSpace());
|
||||
/* Delete the mapping */
|
||||
Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status);
|
||||
ASSERT(FALSE);
|
||||
/* Proceed with the deĺetion anyway */
|
||||
}
|
||||
|
||||
if (Vacb->ReferenceCount != 0)
|
||||
{
|
||||
|
@ -1097,11 +943,8 @@ CcFlushCache (
|
|||
PROS_VACB current;
|
||||
NTSTATUS Status;
|
||||
|
||||
CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
|
||||
SectionObjectPointers, FileOffset, Length);
|
||||
|
||||
DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
|
||||
SectionObjectPointers, FileOffset, Length, IoStatus);
|
||||
CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n",
|
||||
SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length);
|
||||
|
||||
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
|
||||
{
|
||||
|
@ -1217,6 +1060,8 @@ CcRosDeleteFileCache (
|
|||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
|
||||
if(SharedCacheMap->Section)
|
||||
ObDereferenceObject(SharedCacheMap->Section);
|
||||
ObDereferenceObject(SharedCacheMap->FileObject);
|
||||
|
||||
while (!IsListEmpty(&FreeList))
|
||||
|
@ -1299,36 +1144,6 @@ CcRosRemoveIfClosed (
|
|||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
}
|
||||
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
CcRosDereferenceCache (
|
||||
PFILE_OBJECT FileObject)
|
||||
{
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
KIRQL OldIrql;
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
||||
ASSERT(SharedCacheMap);
|
||||
if (SharedCacheMap->OpenCount > 0)
|
||||
{
|
||||
SharedCacheMap->OpenCount--;
|
||||
if (SharedCacheMap->OpenCount == 0)
|
||||
{
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
MmFreeSectionSegments(SharedCacheMap->FileObject);
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
}
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
CcRosReleaseFileCache (
|
||||
|
@ -1373,20 +1188,12 @@ CcRosReleaseFileCache (
|
|||
PrivateMap->NodeTypeCode = 0;
|
||||
}
|
||||
|
||||
if (SharedCacheMap->OpenCount > 0)
|
||||
ASSERT(SharedCacheMap->OpenCount > 0);
|
||||
|
||||
SharedCacheMap->OpenCount--;
|
||||
if (SharedCacheMap->OpenCount == 0)
|
||||
{
|
||||
SharedCacheMap->OpenCount--;
|
||||
if (SharedCacheMap->OpenCount == 0)
|
||||
{
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
MmFreeSectionSegments(SharedCacheMap->FileObject);
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1412,6 +1219,8 @@ CcRosInitializeFileCache (
|
|||
|
||||
DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
Allocated = FALSE;
|
||||
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
||||
if (SharedCacheMap == NULL)
|
||||
|
@ -1437,27 +1246,37 @@ CcRosInitializeFileCache (
|
|||
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
|
||||
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
|
||||
InitializeListHead(&SharedCacheMap->BcbList);
|
||||
}
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
if (Allocated)
|
||||
SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION;
|
||||
|
||||
ObReferenceObjectByPointer(FileObject,
|
||||
FILE_ALL_ACCESS,
|
||||
NULL,
|
||||
KernelMode);
|
||||
|
||||
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
|
||||
|
||||
// CcRosTraceCacheMap(SharedCacheMap, TRUE);
|
||||
}
|
||||
else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION)
|
||||
{
|
||||
if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
|
||||
{
|
||||
ObReferenceObjectByPointer(FileObject,
|
||||
FILE_ALL_ACCESS,
|
||||
NULL,
|
||||
KernelMode);
|
||||
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
|
||||
/* The shared cache map is being created somewhere else. Wait for that to happen */
|
||||
KEVENT Waiter;
|
||||
PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent;
|
||||
|
||||
InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
|
||||
}
|
||||
else
|
||||
{
|
||||
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
|
||||
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
||||
}
|
||||
KeInitializeEvent(&Waiter, NotificationEvent, FALSE);
|
||||
SharedCacheMap->CreateEvent = &Waiter;
|
||||
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL);
|
||||
|
||||
if (PreviousWaiter)
|
||||
KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE);
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
}
|
||||
|
||||
if (FileObject->PrivateCacheMap == NULL)
|
||||
{
|
||||
PPRIVATE_CACHE_MAP PrivateMap;
|
||||
|
@ -1503,8 +1322,48 @@ CcRosInitializeFileCache (
|
|||
FileObject->PrivateCacheMap = PrivateMap;
|
||||
SharedCacheMap->OpenCount++;
|
||||
}
|
||||
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
/* Create the section */
|
||||
if (Allocated)
|
||||
{
|
||||
NTSTATUS Status;
|
||||
|
||||
ASSERT(SharedCacheMap->Section == NULL);
|
||||
|
||||
Status = MmCreateSection(
|
||||
&SharedCacheMap->Section,
|
||||
SECTION_ALL_ACCESS,
|
||||
NULL,
|
||||
&SharedCacheMap->SectionSize,
|
||||
PAGE_READWRITE,
|
||||
0,
|
||||
NULL,
|
||||
FileObject);
|
||||
|
||||
ASSERT(NT_SUCCESS(Status));
|
||||
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
CcRosReleaseFileCache(FileObject);
|
||||
return Status;
|
||||
}
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
|
||||
SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION;
|
||||
|
||||
if (SharedCacheMap->CreateEvent)
|
||||
{
|
||||
KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE);
|
||||
SharedCacheMap->CreateEvent = NULL;
|
||||
}
|
||||
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
}
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1564,8 +1423,6 @@ CcInitView (
|
|||
TAG_VACB,
|
||||
20);
|
||||
|
||||
MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
|
||||
|
||||
CcInitCacheZeroPage();
|
||||
}
|
||||
|
||||
|
|
|
@ -1970,7 +1970,6 @@ Phase1InitializationDiscard(IN PVOID Context)
|
|||
InbvEnableDisplayString(TRUE);
|
||||
|
||||
/* Launch initial process */
|
||||
DPRINT("Free non-cache pages: %lx\n", MmAvailablePages + MiMemoryConsumers[MC_CACHE].PagesUsed);
|
||||
ProcessInfo = &InitBuffer->ProcessInfo;
|
||||
ExpLoadInitialProcess(InitBuffer, &ProcessParameters, &Environment);
|
||||
|
||||
|
@ -2009,7 +2008,6 @@ Phase1InitializationDiscard(IN PVOID Context)
|
|||
|
||||
/* Free the boot buffer */
|
||||
ExFreePoolWithTag(InitBuffer, TAG_INIT);
|
||||
DPRINT("Free non-cache pages: %lx\n", MmAvailablePages + MiMemoryConsumers[MC_CACHE].PagesUsed);
|
||||
}
|
||||
|
||||
VOID
|
||||
|
|
|
@ -719,7 +719,6 @@ QSI_DEF(SystemPerformanceInformation)
|
|||
* Not sure this is right. 8^\
|
||||
*/
|
||||
Spi->CommittedPages = MiMemoryConsumers[MC_SYSTEM].PagesUsed +
|
||||
MiMemoryConsumers[MC_CACHE].PagesUsed +
|
||||
MiMemoryConsumers[MC_USER].PagesUsed +
|
||||
MiUsedSwapPages;
|
||||
/*
|
||||
|
@ -767,7 +766,7 @@ QSI_DEF(SystemPerformanceInformation)
|
|||
Spi->TotalSystemDriverPages = 0; /* FIXME */
|
||||
Spi->Spare3Count = 0; /* FIXME */
|
||||
|
||||
Spi->ResidentSystemCachePage = MiMemoryConsumers[MC_CACHE].PagesUsed;
|
||||
Spi->ResidentSystemCachePage = 0; /* FIXME */
|
||||
Spi->ResidentPagedPoolPage = 0; /* FIXME */
|
||||
|
||||
Spi->ResidentSystemDriverPage = 0; /* FIXME */
|
||||
|
@ -1477,13 +1476,10 @@ QSI_DEF(SystemFileCacheInformation)
|
|||
RtlZeroMemory(Sci, sizeof(SYSTEM_FILECACHE_INFORMATION));
|
||||
|
||||
/* Return the Byte size not the page size. */
|
||||
Sci->CurrentSize =
|
||||
MiMemoryConsumers[MC_CACHE].PagesUsed * PAGE_SIZE;
|
||||
Sci->PeakSize =
|
||||
MiMemoryConsumers[MC_CACHE].PagesUsed * PAGE_SIZE; /* FIXME */
|
||||
Sci->CurrentSize = 0; /* FIXME */
|
||||
Sci->PeakSize = 0; /* FIXME */
|
||||
/* Taskmgr multiplies this one by page size right away */
|
||||
Sci->CurrentSizeIncludingTransitionInPages =
|
||||
MiMemoryConsumers[MC_CACHE].PagesUsed; /* FIXME: Should be */
|
||||
Sci->CurrentSizeIncludingTransitionInPages = 0; /* FIXME: Should be */
|
||||
/* system working set and standby pages. */
|
||||
Sci->PageFaultCount = 0; /* FIXME */
|
||||
Sci->MinimumWorkingSet = 0; /* FIXME */
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
//
|
||||
// Define this if you want debugging support
|
||||
//
|
||||
#define _CC_DEBUG_ 0x00
|
||||
#define _CC_DEBUG_ 0x0
|
||||
|
||||
//
|
||||
// These define the Debug Masks Supported
|
||||
|
@ -179,6 +179,8 @@ typedef struct _ROS_SHARED_CACHE_MAP
|
|||
ULONG DirtyPages;
|
||||
LIST_ENTRY SharedCacheMapLinks;
|
||||
ULONG Flags;
|
||||
PVOID Section;
|
||||
PKEVENT CreateEvent;
|
||||
PCACHE_MANAGER_CALLBACKS Callbacks;
|
||||
PVOID LazyWriteContext;
|
||||
LIST_ENTRY PrivateList;
|
||||
|
@ -197,13 +199,12 @@ typedef struct _ROS_SHARED_CACHE_MAP
|
|||
|
||||
#define READAHEAD_DISABLED 0x1
|
||||
#define WRITEBEHIND_DISABLED 0x2
|
||||
#define SHARED_CACHE_MAP_IN_CREATION 0x4
|
||||
|
||||
typedef struct _ROS_VACB
|
||||
{
|
||||
/* Base address of the region where the view's data is mapped. */
|
||||
PVOID BaseAddress;
|
||||
/* Memory area representing the region where the view's data is mapped. */
|
||||
struct _MEMORY_AREA* MemoryArea;
|
||||
/* Are the contents of the view valid. */
|
||||
BOOLEAN Valid;
|
||||
/* Are the contents of the view newer than those on disk. */
|
||||
|
@ -316,12 +317,19 @@ NTAPI
|
|||
CcRosGetVacb(
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
||||
LONGLONG FileOffset,
|
||||
PLONGLONG BaseOffset,
|
||||
PVOID *BaseAddress,
|
||||
PBOOLEAN UptoDate,
|
||||
PROS_VACB *Vacb
|
||||
);
|
||||
|
||||
BOOLEAN
|
||||
NTAPI
|
||||
CcRosEnsureVacbResident(
|
||||
_In_ PROS_VACB Vacb,
|
||||
_In_ BOOLEAN Wait,
|
||||
_In_ BOOLEAN NoRead,
|
||||
_In_ ULONG Offset,
|
||||
_In_ ULONG Length
|
||||
);
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
CcInitView(VOID);
|
||||
|
@ -330,14 +338,6 @@ VOID
|
|||
NTAPI
|
||||
CcShutdownLazyWriter(VOID);
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
CcReadVirtualAddress(PROS_VACB Vacb);
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
CcWriteVirtualAddress(PROS_VACB Vacb);
|
||||
|
||||
BOOLEAN
|
||||
NTAPI
|
||||
CcInitializeCacheManager(VOID);
|
||||
|
@ -415,8 +415,6 @@ NTAPI
|
|||
CcRosRequestVacb(
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
||||
LONGLONG FileOffset,
|
||||
PVOID* BaseAddress,
|
||||
PBOOLEAN UptoDate,
|
||||
PROS_VACB *Vacb
|
||||
);
|
||||
|
||||
|
|
|
@ -89,12 +89,9 @@ typedef ULONG_PTR SWAPENTRY;
|
|||
|
||||
#define SEC_PHYSICALMEMORY (0x80000000)
|
||||
|
||||
#define MM_DATAFILE_SEGMENT (0x2)
|
||||
|
||||
#define MC_CACHE (0)
|
||||
#define MC_USER (1)
|
||||
#define MC_SYSTEM (2)
|
||||
#define MC_MAXIMUM (3)
|
||||
#define MC_USER (0)
|
||||
#define MC_SYSTEM (1)
|
||||
#define MC_MAXIMUM (2)
|
||||
|
||||
#define PAGED_POOL_MASK 1
|
||||
#define MUST_SUCCEED_POOL_MASK 2
|
||||
|
@ -171,10 +168,10 @@ typedef struct _MM_SECTION_SEGMENT
|
|||
FAST_MUTEX Lock; /* lock which protects the page directory */
|
||||
LARGE_INTEGER RawLength; /* length of the segment which is part of the mapped file */
|
||||
LARGE_INTEGER Length; /* absolute length of the segment */
|
||||
ULONG ReferenceCount;
|
||||
ULONG CacheCount;
|
||||
PULONG ReferenceCount;
|
||||
ULONG SectionCount;
|
||||
ULONG Protection;
|
||||
ULONG Flags;
|
||||
PULONG Flags;
|
||||
BOOLEAN WriteCopy;
|
||||
BOOLEAN Locked;
|
||||
|
||||
|
@ -185,6 +182,9 @@ typedef struct _MM_SECTION_SEGMENT
|
|||
ULONG Characteristics;
|
||||
} Image;
|
||||
|
||||
ULONG RefCount;
|
||||
ULONG SegFlags;
|
||||
|
||||
LIST_ENTRY ListOfSegments;
|
||||
RTL_GENERIC_TABLE PageTable;
|
||||
} MM_SECTION_SEGMENT, *PMM_SECTION_SEGMENT;
|
||||
|
@ -193,12 +193,20 @@ typedef struct _MM_IMAGE_SECTION_OBJECT
|
|||
{
|
||||
PFILE_OBJECT FileObject;
|
||||
|
||||
ULONG RefCount;
|
||||
ULONG SegFlags;
|
||||
|
||||
SECTION_IMAGE_INFORMATION ImageInformation;
|
||||
PVOID BasedAddress;
|
||||
ULONG NrSegments;
|
||||
PMM_SECTION_SEGMENT Segments;
|
||||
} MM_IMAGE_SECTION_OBJECT, *PMM_IMAGE_SECTION_OBJECT;
|
||||
|
||||
#define MM_DATAFILE_SEGMENT (0x2)
|
||||
#define MM_SEGMENT_INDELETE (0x4)
|
||||
#define MM_SEGMENT_INCREATE (0x8)
|
||||
|
||||
|
||||
#define MA_GetStartingAddress(_MemoryArea) ((_MemoryArea)->VadNode.StartingVpn << PAGE_SHIFT)
|
||||
#define MA_GetEndingAddress(_MemoryArea) (((_MemoryArea)->VadNode.EndingVpn + 1) << PAGE_SHIFT)
|
||||
|
||||
|
@ -862,11 +870,6 @@ MmInitializeRmapList(VOID);
|
|||
VOID
|
||||
NTAPI
|
||||
MmSetCleanAllRmaps(PFN_NUMBER Page);
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
MmSetDirtyAllRmaps(PFN_NUMBER Page);
|
||||
|
||||
BOOLEAN
|
||||
NTAPI
|
||||
MmIsDirtyPageRmap(PFN_NUMBER Page);
|
||||
|
@ -1288,15 +1291,6 @@ MmNotPresentFaultSectionView(
|
|||
BOOLEAN Locked
|
||||
);
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
MmPageOutSectionView(
|
||||
PMMSUPPORT AddressSpace,
|
||||
PMEMORY_AREA MemoryArea,
|
||||
PVOID Address,
|
||||
ULONG_PTR Entry
|
||||
);
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
MmCreatePhysicalMemorySection(VOID);
|
||||
|
@ -1337,6 +1331,48 @@ MmMakePagesResident(
|
|||
_In_ PVOID Address,
|
||||
_In_ ULONG Length);
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
MmMakePagesDirty(
|
||||
_In_ PEPROCESS Process,
|
||||
_In_ PVOID Address,
|
||||
_In_ ULONG Length);
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
MmRosFlushVirtualMemory(
|
||||
_In_ PEPROCESS Process,
|
||||
_Inout_ PVOID* Address,
|
||||
_Inout_ PSIZE_T Length,
|
||||
_Out_ PIO_STATUS_BLOCK Iosb);
|
||||
|
||||
BOOLEAN
|
||||
NTAPI
|
||||
MmCheckDirtySegment(
|
||||
PMM_SECTION_SEGMENT Segment,
|
||||
PLARGE_INTEGER Offset,
|
||||
BOOLEAN ForceDirty,
|
||||
BOOLEAN PageOut);
|
||||
|
||||
BOOLEAN
|
||||
NTAPI
|
||||
MmUnsharePageEntrySectionSegment(PMEMORY_AREA MemoryArea,
|
||||
PMM_SECTION_SEGMENT Segment,
|
||||
PLARGE_INTEGER Offset,
|
||||
BOOLEAN Dirty,
|
||||
BOOLEAN PageOut,
|
||||
ULONG_PTR *InEntry);
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
MmDereferenceSegment(PMM_SECTION_SEGMENT Segment);
|
||||
|
||||
NTSTATUS
|
||||
NTAPI
|
||||
MmExtendSection(
|
||||
_In_ PVOID Section,
|
||||
_Inout_ PLARGE_INTEGER NewSize);
|
||||
|
||||
/* sysldr.c ******************************************************************/
|
||||
|
||||
VOID
|
||||
|
|
|
@ -2934,7 +2934,7 @@ MmMapViewOfArm3Section(IN PVOID SectionObject,
|
|||
if (!(*ViewSize))
|
||||
{
|
||||
/* Compute it for the caller */
|
||||
CalculatedViewSize = Section->SizeOfSection.QuadPart -
|
||||
CalculatedViewSize = Section->SizeOfSection.QuadPart -
|
||||
SectionOffset->QuadPart;
|
||||
|
||||
/* Check if it's larger than 4GB or overflows into kernel-mode */
|
||||
|
@ -3891,30 +3891,24 @@ NtExtendSection(IN HANDLE SectionHandle,
|
|||
NULL);
|
||||
if (!NT_SUCCESS(Status)) return Status;
|
||||
|
||||
/* Really this should go in MmExtendSection */
|
||||
if (!Section->u.Flags.File || Section->u.Flags.Image)
|
||||
{
|
||||
DPRINT1("Not extending a file\n");
|
||||
ObDereferenceObject(Section);
|
||||
return STATUS_SECTION_NOT_EXTENDED;
|
||||
}
|
||||
|
||||
/* FIXME: Do the work */
|
||||
Status = MmExtendSection(Section, &SafeNewMaximumSize);
|
||||
|
||||
/* Dereference the section */
|
||||
ObDereferenceObject(Section);
|
||||
|
||||
/* Enter SEH */
|
||||
_SEH2_TRY
|
||||
if (NT_SUCCESS(Status))
|
||||
{
|
||||
/* Write back the new size */
|
||||
*NewMaximumSize = SafeNewMaximumSize;
|
||||
_SEH2_TRY
|
||||
{
|
||||
/* Write back the new size */
|
||||
*NewMaximumSize = SafeNewMaximumSize;
|
||||
}
|
||||
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
|
||||
{
|
||||
Status = _SEH2_GetExceptionCode();
|
||||
}
|
||||
_SEH2_END;
|
||||
}
|
||||
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
|
||||
{
|
||||
/* Nothing to do */
|
||||
}
|
||||
_SEH2_END;
|
||||
|
||||
/* Return the status */
|
||||
return STATUS_NOT_IMPLEMENTED;
|
||||
|
|
|
@ -1356,12 +1356,8 @@ MmFlushVirtualMemory(IN PEPROCESS Process,
|
|||
OUT PIO_STATUS_BLOCK IoStatusBlock)
|
||||
{
|
||||
PAGED_CODE();
|
||||
UNIMPLEMENTED;
|
||||
|
||||
//
|
||||
// Fake success
|
||||
//
|
||||
return STATUS_SUCCESS;
|
||||
/* For now we call the old Mm */
|
||||
return MmRosFlushVirtualMemory(Process, BaseAddress, RegionSize, IoStatusBlock);
|
||||
}
|
||||
|
||||
ULONG
|
||||
|
|
|
@ -46,7 +46,7 @@ MmZeroPageThread(VOID)
|
|||
/* Get the discardable sections to free them */
|
||||
MiFindInitializationCode(&StartAddress, &EndAddress);
|
||||
if (StartAddress) MiFreeInitializationCode(StartAddress, EndAddress);
|
||||
DPRINT("Free non-cache pages: %lx\n", MmAvailablePages + MiMemoryConsumers[MC_CACHE].PagesUsed);
|
||||
DPRINT("Free pages: %lx\n", MmAvailablePages);
|
||||
|
||||
/* Set our priority to 0 */
|
||||
Thread->BasePriority = 0;
|
||||
|
|
|
@ -54,18 +54,6 @@ MmInitializeBalancer(ULONG NrAvailablePages, ULONG NrSystemPages)
|
|||
/* Set up targets. */
|
||||
MiMinimumAvailablePages = 256;
|
||||
MiMinimumPagesPerRun = 256;
|
||||
if ((NrAvailablePages + NrSystemPages) >= 8192)
|
||||
{
|
||||
MiMemoryConsumers[MC_CACHE].PagesTarget = NrAvailablePages / 4 * 3;
|
||||
}
|
||||
else if ((NrAvailablePages + NrSystemPages) >= 4096)
|
||||
{
|
||||
MiMemoryConsumers[MC_CACHE].PagesTarget = NrAvailablePages / 3 * 2;
|
||||
}
|
||||
else
|
||||
{
|
||||
MiMemoryConsumers[MC_CACHE].PagesTarget = NrAvailablePages / 8;
|
||||
}
|
||||
MiMemoryConsumers[MC_USER].PagesTarget = NrAvailablePages - MiMinimumAvailablePages;
|
||||
}
|
||||
|
||||
|
|
|
@ -94,9 +94,6 @@ NTAPI
|
|||
MmBuildMdlFromPages(PMDL Mdl, PPFN_NUMBER Pages)
|
||||
{
|
||||
memcpy(Mdl + 1, Pages, sizeof(PFN_NUMBER) * (PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE));
|
||||
|
||||
/* FIXME: this flag should be set by the caller perhaps? */
|
||||
Mdl->MdlFlags |= MDL_IO_PAGE_READ;
|
||||
}
|
||||
|
||||
|
||||
|
@ -230,7 +227,7 @@ MiReadPageFile(
|
|||
|
||||
MmInitializeMdl(Mdl, NULL, PAGE_SIZE);
|
||||
MmBuildMdlFromPages(Mdl, &Page);
|
||||
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
|
||||
Mdl->MdlFlags |= MDL_PAGES_LOCKED | MDL_IO_PAGE_READ;
|
||||
|
||||
file_offset.QuadPart = PageFileOffset * PAGE_SIZE;
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
/* GLOBALS ******************************************************************/
|
||||
|
||||
static NPAGED_LOOKASIDE_LIST RmapLookasideList;
|
||||
FAST_MUTEX RmapListLock;
|
||||
|
||||
/* FUNCTIONS ****************************************************************/
|
||||
|
||||
|
@ -38,7 +37,6 @@ VOID
|
|||
NTAPI
|
||||
MmInitializeRmapList(VOID)
|
||||
{
|
||||
ExInitializeFastMutex(&RmapListLock);
|
||||
ExInitializeNPagedLookasideList (&RmapLookasideList,
|
||||
NULL,
|
||||
RmapListFree,
|
||||
|
@ -55,37 +53,27 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
|
|||
PMM_RMAP_ENTRY entry;
|
||||
PMEMORY_AREA MemoryArea;
|
||||
PMMSUPPORT AddressSpace;
|
||||
ULONG Type;
|
||||
PVOID Address;
|
||||
PEPROCESS Process;
|
||||
ULONGLONG Offset;
|
||||
NTSTATUS Status = STATUS_SUCCESS;
|
||||
PMM_SECTION_SEGMENT Segment;
|
||||
LARGE_INTEGER SegmentOffset;
|
||||
KIRQL OldIrql;
|
||||
|
||||
OldIrql = MiAcquirePfnLock();
|
||||
|
||||
ExAcquireFastMutex(&RmapListLock);
|
||||
entry = MmGetRmapListHeadPage(Page);
|
||||
|
||||
#ifdef NEWCC
|
||||
// Special case for NEWCC: we can have a page that's only in a segment
|
||||
// page table
|
||||
if (entry && RMAP_IS_SEGMENT(entry->Address) && entry->Next == NULL)
|
||||
{
|
||||
/* NEWCC does locking itself */
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
return MmpPageOutPhysicalAddress(Page);
|
||||
}
|
||||
#endif
|
||||
|
||||
while (entry && RMAP_IS_SEGMENT(entry->Address))
|
||||
entry = entry->Next;
|
||||
|
||||
if (entry == NULL)
|
||||
{
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
return(STATUS_UNSUCCESSFUL);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
return STATUS_UNSUCCESSFUL;
|
||||
}
|
||||
|
||||
Process = entry->Process;
|
||||
|
||||
Address = entry->Address;
|
||||
|
||||
if ((((ULONG_PTR)Address) & 0xFFF) != 0)
|
||||
|
@ -97,12 +85,12 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
|
|||
{
|
||||
if (!ExAcquireRundownProtection(&Process->RundownProtect))
|
||||
{
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
return STATUS_PROCESS_IS_TERMINATING;
|
||||
}
|
||||
|
||||
Status = ObReferenceObjectByPointer(Process, PROCESS_ALL_ACCESS, NULL, KernelMode);
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
ExReleaseRundownProtection(&Process->RundownProtect);
|
||||
|
@ -112,11 +100,12 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
|
|||
}
|
||||
else
|
||||
{
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
AddressSpace = MmGetKernelAddressSpace();
|
||||
}
|
||||
|
||||
MmLockAddressSpace(AddressSpace);
|
||||
|
||||
MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, Address);
|
||||
if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
|
||||
{
|
||||
|
@ -128,23 +117,27 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
|
|||
}
|
||||
return(STATUS_UNSUCCESSFUL);
|
||||
}
|
||||
Type = MemoryArea->Type;
|
||||
if (Type == MEMORY_AREA_SECTION_VIEW)
|
||||
|
||||
if (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW)
|
||||
{
|
||||
ULONG_PTR Entry;
|
||||
Offset = MemoryArea->SectionData.ViewOffset.QuadPart +
|
||||
BOOLEAN Dirty;
|
||||
PFN_NUMBER MapPage;
|
||||
LARGE_INTEGER Offset;
|
||||
BOOLEAN Released;
|
||||
|
||||
Offset.QuadPart = MemoryArea->SectionData.ViewOffset.QuadPart +
|
||||
((ULONG_PTR)Address - MA_GetStartingAddress(MemoryArea));
|
||||
|
||||
MmLockSectionSegment(MemoryArea->SectionData.Segment);
|
||||
Segment = MemoryArea->SectionData.Segment;
|
||||
|
||||
/*
|
||||
* Get or create a pageop
|
||||
*/
|
||||
Entry = MmGetPageEntrySectionSegment(MemoryArea->SectionData.Segment,
|
||||
(PLARGE_INTEGER)&Offset);
|
||||
MmLockSectionSegment(Segment);
|
||||
|
||||
Entry = MmGetPageEntrySectionSegment(Segment, &Offset);
|
||||
if (Entry && MM_IS_WAIT_PTE(Entry))
|
||||
{
|
||||
MmUnlockSectionSegment(MemoryArea->SectionData.Segment);
|
||||
/* The segment is being read or something. Give up */
|
||||
MmUnlockSectionSegment(Segment);
|
||||
MmUnlockAddressSpace(AddressSpace);
|
||||
if (Address < MmSystemRangeStart)
|
||||
{
|
||||
|
@ -154,18 +147,101 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
|
|||
return(STATUS_UNSUCCESSFUL);
|
||||
}
|
||||
|
||||
MmSetPageEntrySectionSegment(MemoryArea->SectionData.Segment, (PLARGE_INTEGER)&Offset, MAKE_SWAP_SSE(MM_WAIT_ENTRY));
|
||||
/* Delete this virtual mapping in the process */
|
||||
MmDeleteVirtualMapping(Process, Address, &Dirty, &MapPage);
|
||||
ASSERT(MapPage == Page);
|
||||
|
||||
/*
|
||||
* Release locks now we have a page op.
|
||||
*/
|
||||
MmUnlockSectionSegment(MemoryArea->SectionData.Segment);
|
||||
if (Page != PFN_FROM_SSE(Entry))
|
||||
{
|
||||
SWAPENTRY SwapEntry;
|
||||
|
||||
/* This page is private to the process */
|
||||
MmUnlockSectionSegment(Segment);
|
||||
|
||||
/* Check if we should write it back to the page file */
|
||||
SwapEntry = MmGetSavedSwapEntryPage(Page);
|
||||
|
||||
if ((SwapEntry == 0) && Dirty)
|
||||
{
|
||||
/* We don't have a Swap entry, yet the page is dirty. Get one */
|
||||
SwapEntry = MmAllocSwapPage();
|
||||
if (!SwapEntry)
|
||||
{
|
||||
PMM_REGION Region = MmFindRegion((PVOID)MA_GetStartingAddress(MemoryArea),
|
||||
&MemoryArea->SectionData.RegionListHead,
|
||||
Address, NULL);
|
||||
|
||||
/* We can't, so let this page in the Process VM */
|
||||
MmCreateVirtualMapping(Process, Address, Region->Protect, &Page, 1);
|
||||
MmSetDirtyPage(Process, Address);
|
||||
|
||||
MmUnlockAddressSpace(AddressSpace);
|
||||
return STATUS_UNSUCCESSFUL;
|
||||
}
|
||||
}
|
||||
|
||||
if (Dirty)
|
||||
{
|
||||
Status = MmWriteToSwapPage(SwapEntry, Page);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
/* We failed at saving the content of this page. Keep it in */
|
||||
PMM_REGION Region = MmFindRegion((PVOID)MA_GetStartingAddress(MemoryArea),
|
||||
&MemoryArea->SectionData.RegionListHead,
|
||||
Address, NULL);
|
||||
|
||||
/* This Swap Entry is useless to us */
|
||||
MmSetSavedSwapEntryPage(Page, 0);
|
||||
MmFreeSwapPage(SwapEntry);
|
||||
|
||||
/* We can't, so let this page in the Process VM */
|
||||
MmCreateVirtualMapping(Process, Address, Region->Protect, &Page, 1);
|
||||
MmSetDirtyPage(Process, Address);
|
||||
|
||||
MmUnlockAddressSpace(AddressSpace);
|
||||
return STATUS_UNSUCCESSFUL;
|
||||
}
|
||||
}
|
||||
|
||||
if (SwapEntry)
|
||||
{
|
||||
/* Keep this in the process VM */
|
||||
MmCreatePageFileMapping(Process, Address, SwapEntry);
|
||||
MmSetSavedSwapEntryPage(Page, 0);
|
||||
}
|
||||
|
||||
MmUnlockAddressSpace(AddressSpace);
|
||||
|
||||
/* We can finally let this page go */
|
||||
MmDeleteRmap(Page, Process, Address);
|
||||
MmReleasePageMemoryConsumer(MC_USER, Page);
|
||||
|
||||
ASSERT(MmGetRmapListHeadPage(Page) == NULL);
|
||||
|
||||
if (Address < MmSystemRangeStart)
|
||||
{
|
||||
ExReleaseRundownProtection(&Process->RundownProtect);
|
||||
ObDereferenceObject(Process);
|
||||
}
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/* Delete this RMAP */
|
||||
MmDeleteRmap(Page, Process, Address);
|
||||
|
||||
/* One less mapping referencing this segment */
|
||||
Released = MmUnsharePageEntrySectionSegment(MemoryArea, Segment, &Offset, Dirty, FALSE, NULL);
|
||||
|
||||
MmUnlockSectionSegment(Segment);
|
||||
MmUnlockAddressSpace(AddressSpace);
|
||||
|
||||
/*
|
||||
* Do the actual page out work.
|
||||
*/
|
||||
Status = MmPageOutSectionView(AddressSpace, MemoryArea, Address, Entry);
|
||||
if (Address < MmSystemRangeStart)
|
||||
{
|
||||
ExReleaseRundownProtection(&Process->RundownProtect);
|
||||
ObDereferenceObject(Process);
|
||||
}
|
||||
|
||||
if (Released) return STATUS_SUCCESS;
|
||||
}
|
||||
#ifdef NEWCC
|
||||
else if (Type == MEMORY_AREA_CACHE)
|
||||
|
@ -185,7 +261,29 @@ MmPageOutPhysicalAddress(PFN_NUMBER Page)
|
|||
ExReleaseRundownProtection(&Process->RundownProtect);
|
||||
ObDereferenceObject(Process);
|
||||
}
|
||||
return(Status);
|
||||
|
||||
/* Now write this page to file, if needed */
|
||||
Segment = MmGetSectionAssociation(Page, &SegmentOffset);
|
||||
if (Segment)
|
||||
{
|
||||
BOOLEAN Released;
|
||||
|
||||
MmLockSectionSegment(Segment);
|
||||
|
||||
Released = MmCheckDirtySegment(Segment, &SegmentOffset, FALSE, TRUE);
|
||||
|
||||
MmUnlockSectionSegment(Segment);
|
||||
|
||||
MmDereferenceSegment(Segment);
|
||||
|
||||
if (Released)
|
||||
{
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
/* If we are here, then we didn't release the page */
|
||||
return STATUS_UNSUCCESSFUL;
|
||||
}
|
||||
|
||||
VOID
|
||||
|
@ -193,12 +291,13 @@ NTAPI
|
|||
MmSetCleanAllRmaps(PFN_NUMBER Page)
|
||||
{
|
||||
PMM_RMAP_ENTRY current_entry;
|
||||
KIRQL OldIrql;
|
||||
|
||||
ExAcquireFastMutex(&RmapListLock);
|
||||
OldIrql = MiAcquirePfnLock();
|
||||
current_entry = MmGetRmapListHeadPage(Page);
|
||||
if (current_entry == NULL)
|
||||
{
|
||||
DPRINT1("MmIsDirtyRmap: No rmaps.\n");
|
||||
DPRINT1("MmSetCleanAllRmaps: No rmaps.\n");
|
||||
KeBugCheck(MEMORY_MANAGEMENT);
|
||||
}
|
||||
while (current_entry != NULL)
|
||||
|
@ -207,29 +306,7 @@ MmSetCleanAllRmaps(PFN_NUMBER Page)
|
|||
MmSetCleanPage(current_entry->Process, current_entry->Address);
|
||||
current_entry = current_entry->Next;
|
||||
}
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
}
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
MmSetDirtyAllRmaps(PFN_NUMBER Page)
|
||||
{
|
||||
PMM_RMAP_ENTRY current_entry;
|
||||
|
||||
ExAcquireFastMutex(&RmapListLock);
|
||||
current_entry = MmGetRmapListHeadPage(Page);
|
||||
if (current_entry == NULL)
|
||||
{
|
||||
DPRINT1("MmIsDirtyRmap: No rmaps.\n");
|
||||
KeBugCheck(MEMORY_MANAGEMENT);
|
||||
}
|
||||
while (current_entry != NULL)
|
||||
{
|
||||
if (!RMAP_IS_SEGMENT(current_entry->Address))
|
||||
MmSetDirtyPage(current_entry->Process, current_entry->Address);
|
||||
current_entry = current_entry->Next;
|
||||
}
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
}
|
||||
|
||||
BOOLEAN
|
||||
|
@ -237,27 +314,31 @@ NTAPI
|
|||
MmIsDirtyPageRmap(PFN_NUMBER Page)
|
||||
{
|
||||
PMM_RMAP_ENTRY current_entry;
|
||||
KIRQL OldIrql;
|
||||
BOOLEAN Dirty = FALSE;
|
||||
|
||||
ExAcquireFastMutex(&RmapListLock);
|
||||
OldIrql = MiAcquirePfnLock();
|
||||
current_entry = MmGetRmapListHeadPage(Page);
|
||||
if (current_entry == NULL)
|
||||
{
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
return(FALSE);
|
||||
DPRINT1("MmIsDirtyPageRmap: No rmaps.\n");
|
||||
KeBugCheck(MEMORY_MANAGEMENT);
|
||||
}
|
||||
while (current_entry != NULL)
|
||||
{
|
||||
if (
|
||||
!RMAP_IS_SEGMENT(current_entry->Address) &&
|
||||
MmIsDirtyPage(current_entry->Process, current_entry->Address))
|
||||
if (!RMAP_IS_SEGMENT(current_entry->Address))
|
||||
{
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
return(TRUE);
|
||||
if (MmIsDirtyPage(current_entry->Process, current_entry->Address))
|
||||
{
|
||||
Dirty = TRUE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
current_entry = current_entry->Next;
|
||||
}
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
return(FALSE);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
|
||||
return Dirty;
|
||||
}
|
||||
|
||||
VOID
|
||||
|
@ -268,6 +349,8 @@ MmInsertRmap(PFN_NUMBER Page, PEPROCESS Process,
|
|||
PMM_RMAP_ENTRY current_entry;
|
||||
PMM_RMAP_ENTRY new_entry;
|
||||
ULONG PrevSize;
|
||||
KIRQL OldIrql;
|
||||
|
||||
if (!RMAP_IS_SEGMENT(Address))
|
||||
Address = (PVOID)PAGE_ROUND_DOWN(Address);
|
||||
|
||||
|
@ -298,7 +381,7 @@ MmInsertRmap(PFN_NUMBER Page, PEPROCESS Process,
|
|||
KeBugCheck(MEMORY_MANAGEMENT);
|
||||
}
|
||||
|
||||
ExAcquireFastMutex(&RmapListLock);
|
||||
OldIrql = MiAcquirePfnLock();
|
||||
current_entry = MmGetRmapListHeadPage(Page);
|
||||
new_entry->Next = current_entry;
|
||||
#if DBG
|
||||
|
@ -318,7 +401,8 @@ MmInsertRmap(PFN_NUMBER Page, PEPROCESS Process,
|
|||
}
|
||||
#endif
|
||||
MmSetRmapListHeadPage(Page, new_entry);
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
|
||||
if (!RMAP_IS_SEGMENT(Address))
|
||||
{
|
||||
if (Process == NULL)
|
||||
|
@ -336,63 +420,15 @@ MmInsertRmap(PFN_NUMBER Page, PEPROCESS Process,
|
|||
}
|
||||
}
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
MmDeleteAllRmaps(PFN_NUMBER Page, PVOID Context,
|
||||
VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process,
|
||||
PVOID Address))
|
||||
{
|
||||
PMM_RMAP_ENTRY current_entry;
|
||||
PMM_RMAP_ENTRY previous_entry;
|
||||
PEPROCESS Process;
|
||||
|
||||
ExAcquireFastMutex(&RmapListLock);
|
||||
current_entry = MmGetRmapListHeadPage(Page);
|
||||
if (current_entry == NULL)
|
||||
{
|
||||
DPRINT1("MmDeleteAllRmaps: No rmaps.\n");
|
||||
KeBugCheck(MEMORY_MANAGEMENT);
|
||||
}
|
||||
MmSetRmapListHeadPage(Page, NULL);
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
|
||||
while (current_entry != NULL)
|
||||
{
|
||||
previous_entry = current_entry;
|
||||
current_entry = current_entry->Next;
|
||||
if (!RMAP_IS_SEGMENT(previous_entry->Address))
|
||||
{
|
||||
if (DeleteMapping)
|
||||
{
|
||||
DeleteMapping(Context, previous_entry->Process,
|
||||
previous_entry->Address);
|
||||
}
|
||||
Process = previous_entry->Process;
|
||||
ExFreeToNPagedLookasideList(&RmapLookasideList, previous_entry);
|
||||
if (Process == NULL)
|
||||
{
|
||||
Process = PsInitialSystemProcess;
|
||||
}
|
||||
if (Process)
|
||||
{
|
||||
(void)InterlockedExchangeAddUL(&Process->Vm.WorkingSetSize, -PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ExFreeToNPagedLookasideList(&RmapLookasideList, previous_entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VOID
|
||||
NTAPI
|
||||
MmDeleteRmap(PFN_NUMBER Page, PEPROCESS Process,
|
||||
PVOID Address)
|
||||
{
|
||||
PMM_RMAP_ENTRY current_entry, previous_entry;
|
||||
KIRQL OldIrql;
|
||||
|
||||
ExAcquireFastMutex(&RmapListLock);
|
||||
OldIrql = MiAcquirePfnLock();
|
||||
previous_entry = NULL;
|
||||
current_entry = MmGetRmapListHeadPage(Page);
|
||||
|
||||
|
@ -409,7 +445,8 @@ MmDeleteRmap(PFN_NUMBER Page, PEPROCESS Process,
|
|||
{
|
||||
previous_entry->Next = current_entry->Next;
|
||||
}
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
|
||||
ExFreeToNPagedLookasideList(&RmapLookasideList, current_entry);
|
||||
if (!RMAP_IS_SEGMENT(Address))
|
||||
{
|
||||
|
@ -450,8 +487,8 @@ MmGetSegmentRmap(PFN_NUMBER Page, PULONG RawOffset)
|
|||
{
|
||||
PCACHE_SECTION_PAGE_TABLE Result = NULL;
|
||||
PMM_RMAP_ENTRY current_entry;//, previous_entry;
|
||||
KIRQL OldIrql = MiAcquirePfnLock();
|
||||
|
||||
ExAcquireFastMutex(&RmapListLock);
|
||||
//previous_entry = NULL;
|
||||
current_entry = MmGetRmapListHeadPage(Page);
|
||||
while (current_entry != NULL)
|
||||
|
@ -460,14 +497,20 @@ MmGetSegmentRmap(PFN_NUMBER Page, PULONG RawOffset)
|
|||
{
|
||||
Result = (PCACHE_SECTION_PAGE_TABLE)current_entry->Process;
|
||||
*RawOffset = (ULONG_PTR)current_entry->Address & ~RMAP_SEGMENT_MASK;
|
||||
InterlockedIncrementUL(&Result->Segment->ReferenceCount);
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
if (*Result->Segment->Flags & MM_SEGMENT_INDELETE)
|
||||
{
|
||||
MiReleasePfnLock(OldIrql);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
InterlockedIncrementUL(Result->Segment->ReferenceCount);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
return Result;
|
||||
}
|
||||
//previous_entry = current_entry;
|
||||
current_entry = current_entry->Next;
|
||||
}
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -482,8 +525,8 @@ NTAPI
|
|||
MmDeleteSectionAssociation(PFN_NUMBER Page)
|
||||
{
|
||||
PMM_RMAP_ENTRY current_entry, previous_entry;
|
||||
KIRQL OldIrql = MiAcquirePfnLock();
|
||||
|
||||
ExAcquireFastMutex(&RmapListLock);
|
||||
previous_entry = NULL;
|
||||
current_entry = MmGetRmapListHeadPage(Page);
|
||||
while (current_entry != NULL)
|
||||
|
@ -498,12 +541,12 @@ MmDeleteSectionAssociation(PFN_NUMBER Page)
|
|||
{
|
||||
previous_entry->Next = current_entry->Next;
|
||||
}
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
ExFreeToNPagedLookasideList(&RmapLookasideList, current_entry);
|
||||
return;
|
||||
}
|
||||
previous_entry = current_entry;
|
||||
current_entry = current_entry->Next;
|
||||
}
|
||||
ExReleaseFastMutex(&RmapListLock);
|
||||
MiReleasePfnLock(OldIrql);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -4,7 +4,7 @@
|
|||
* FILE: ntoskrnl/po/power.c
|
||||
* PURPOSE: Power Manager
|
||||
* PROGRAMMERS: Casper S. Hornstrup (chorns@users.sourceforge.net)
|
||||
* Hervé Poussineau (hpoussin@reactos.com)
|
||||
* Herv<EFBFBD> Poussineau (hpoussin@reactos.com)
|
||||
*/
|
||||
|
||||
/* INCLUDES ******************************************************************/
|
||||
|
@ -1075,7 +1075,7 @@ NtSetSystemPowerState(IN POWER_ACTION SystemAction,
|
|||
#ifndef NEWCC
|
||||
/* Flush dirty cache pages */
|
||||
/* XXX: Is that still mandatory? As now we'll wait on lazy writer to complete? */
|
||||
CcRosFlushDirtyPages(-1, &Dummy, FALSE, FALSE); //HACK: We really should wait here!
|
||||
CcRosFlushDirtyPages(-1, &Dummy, TRUE, FALSE); //HACK: We really should wait here!
|
||||
#else
|
||||
Dummy = 0;
|
||||
#endif
|
||||
|
|
Loading…
Reference in a new issue