2008-03-09 14:11:42 +00:00
|
|
|
/*
|
2001-10-10 21:46:13 +00:00
|
|
|
* COPYRIGHT: See COPYING in the top level directory
|
|
|
|
* PROJECT: ReactOS kernel
|
|
|
|
* FILE: ntoskrnl/cc/copy.c
|
|
|
|
* PURPOSE: Implements cache managers copy interface
|
2005-01-26 13:58:37 +00:00
|
|
|
*
|
2018-01-23 22:23:32 +00:00
|
|
|
* PROGRAMMERS: Some people?
|
|
|
|
* Pierre Schweitzer (pierre@reactos.org)
|
2001-10-10 21:46:13 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* INCLUDES ******************************************************************/
|
|
|
|
|
2004-08-15 16:39:12 +00:00
|
|
|
#include <ntoskrnl.h>
|
2001-10-10 21:46:13 +00:00
|
|
|
#define NDEBUG
|
2008-08-30 16:31:06 +00:00
|
|
|
#include <debug.h>
|
2001-10-10 21:46:13 +00:00
|
|
|
|
|
|
|
/* GLOBALS *******************************************************************/
|
|
|
|
|
2010-07-15 22:50:12 +00:00
|
|
|
static PFN_NUMBER CcZeroPage = 0;
|
2002-06-10 21:11:56 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
#define MAX_ZERO_LENGTH (256 * 1024)
|
2014-08-18 03:55:59 +00:00
|
|
|
|
|
|
|
typedef enum _CC_COPY_OPERATION
|
|
|
|
{
|
|
|
|
CcOperationRead,
|
|
|
|
CcOperationWrite,
|
|
|
|
CcOperationZero
|
|
|
|
} CC_COPY_OPERATION;
|
2004-06-06 08:36:31 +00:00
|
|
|
|
2018-02-09 12:56:16 +00:00
|
|
|
typedef enum _CC_CAN_WRITE_RETRY
|
|
|
|
{
|
|
|
|
FirstTry = 0,
|
|
|
|
RetryAllowRemote = 253,
|
|
|
|
RetryForceCheckPerFile = 254,
|
|
|
|
RetryMasterLocked = 255,
|
|
|
|
} CC_CAN_WRITE_RETRY;
|
|
|
|
|
2014-11-29 21:43:39 +00:00
|
|
|
ULONG CcRosTraceLevel = 0;
|
2005-11-22 02:30:18 +00:00
|
|
|
ULONG CcFastMdlReadWait;
|
2007-01-21 04:46:38 +00:00
|
|
|
ULONG CcFastMdlReadNotPossible;
|
2005-11-22 02:30:18 +00:00
|
|
|
ULONG CcFastReadNotPossible;
|
|
|
|
ULONG CcFastReadWait;
|
Implemented:
ExSetResourceOwnerPointer, FsRtlDissectDbcs, FsRtlDoesDbcsContainWildCards, FsRtlAcquireFileExclusive, FsRtlReleaseFile, FsRtlGetNextMcbEntry, FsRtlLookupLastMcbEntry, FsRtlLookupMcbEntry, FsRtlRemoveMcbEntry, FsRtlIncrementCcFastReadResourceMiss, FsRtlIncrementCcFastReadNotPossible, FsRtlIncrementCcFastReadWait, FsRtlIncrementCcFastReadNoWait , FsRtlAreNamesEqual, FsRtlDoesNameContainWildCards, IoCreateDriver, IoDeleteDriver
Thanks to Filip for reviewing some of these.
svn path=/trunk/; revision=12408
2004-12-30 18:30:05 +00:00
|
|
|
ULONG CcFastReadNoWait;
|
|
|
|
ULONG CcFastReadResourceMiss;
|
|
|
|
|
2018-02-24 13:02:33 +00:00
|
|
|
/* Counters:
|
|
|
|
* - Amount of pages flushed to the disk
|
|
|
|
* - Number of flush operations
|
|
|
|
*/
|
|
|
|
ULONG CcDataPages = 0;
|
|
|
|
ULONG CcDataFlushes = 0;
|
|
|
|
|
2001-10-10 21:46:13 +00:00
|
|
|
/* FUNCTIONS *****************************************************************/
|
|
|
|
|
2010-09-27 21:58:54 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
MiZeroPhysicalPage (
|
2010-09-27 21:58:54 +00:00
|
|
|
IN PFN_NUMBER PageFrameIndex
|
|
|
|
);
|
|
|
|
|
2005-05-09 01:38:29 +00:00
|
|
|
VOID
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
CcInitCacheZeroPage (
|
|
|
|
VOID)
|
2002-06-10 21:11:56 +00:00
|
|
|
{
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS Status;
|
|
|
|
|
|
|
|
MI_SET_USAGE(MI_USAGE_CACHE);
|
|
|
|
//MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
|
|
|
|
Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
DbgPrint("Can't allocate CcZeroPage.\n");
|
|
|
|
KeBugCheck(CACHE_MANAGER);
|
|
|
|
}
|
|
|
|
MiZeroPhysicalPage(CcZeroPage);
|
2002-06-10 21:11:56 +00:00
|
|
|
}
|
|
|
|
|
2005-05-09 01:38:29 +00:00
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2014-04-12 09:31:07 +00:00
|
|
|
CcReadVirtualAddress (
|
|
|
|
PROS_VACB Vacb)
|
2001-10-10 21:46:13 +00:00
|
|
|
{
|
2018-09-23 08:31:01 +00:00
|
|
|
ULONG Size;
|
2010-12-28 21:34:54 +00:00
|
|
|
PMDL Mdl;
|
|
|
|
NTSTATUS Status;
|
|
|
|
IO_STATUS_BLOCK IoStatus;
|
|
|
|
KEVENT Event;
|
2018-09-21 06:31:05 +00:00
|
|
|
ULARGE_INTEGER LargeSize;
|
2001-10-10 21:46:13 +00:00
|
|
|
|
2018-09-21 06:31:05 +00:00
|
|
|
LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
|
|
|
|
if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
|
2002-01-26 21:21:02 +00:00
|
|
|
{
|
2018-09-21 06:31:05 +00:00
|
|
|
LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
|
2002-01-26 21:21:02 +00:00
|
|
|
}
|
2018-09-21 06:31:05 +00:00
|
|
|
Size = LargeSize.LowPart;
|
2010-12-28 21:34:54 +00:00
|
|
|
|
2018-09-23 08:31:01 +00:00
|
|
|
Size = ROUND_TO_PAGES(Size);
|
|
|
|
ASSERT(Size <= VACB_MAPPING_GRANULARITY);
|
|
|
|
ASSERT(Size > 0);
|
2016-08-10 11:52:30 +00:00
|
|
|
|
2018-09-23 08:31:01 +00:00
|
|
|
Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
|
2010-12-28 21:34:54 +00:00
|
|
|
if (!Mdl)
|
2002-01-26 21:21:02 +00:00
|
|
|
{
|
2010-12-28 21:40:23 +00:00
|
|
|
return STATUS_INSUFFICIENT_RESOURCES;
|
2002-01-26 21:21:02 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2017-09-15 11:49:55 +00:00
|
|
|
Status = STATUS_SUCCESS;
|
|
|
|
_SEH2_TRY
|
2002-01-26 21:21:02 +00:00
|
|
|
{
|
2017-09-15 11:49:55 +00:00
|
|
|
MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
|
|
|
|
}
|
|
|
|
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
|
|
|
|
{
|
|
|
|
Status = _SEH2_GetExceptionCode();
|
2018-04-08 16:45:38 +00:00
|
|
|
DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
|
2017-09-15 11:49:55 +00:00
|
|
|
KeBugCheck(CACHE_MANAGER);
|
|
|
|
} _SEH2_END;
|
|
|
|
|
|
|
|
if (NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
Mdl->MdlFlags |= MDL_IO_PAGE_READ;
|
|
|
|
KeInitializeEvent(&Event, NotificationEvent, FALSE);
|
|
|
|
Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
|
|
|
|
if (Status == STATUS_PENDING)
|
|
|
|
{
|
|
|
|
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
|
|
|
|
Status = IoStatus.Status;
|
|
|
|
}
|
|
|
|
|
|
|
|
MmUnlockPages(Mdl);
|
2010-12-28 21:34:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IoFreeMdl(Mdl);
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
|
2010-12-28 21:34:54 +00:00
|
|
|
{
|
|
|
|
DPRINT1("IoPageRead failed, Status %x\n", Status);
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
if (Size < VACB_MAPPING_GRANULARITY)
|
2010-12-28 21:34:54 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
RtlZeroMemory((char*)Vacb->BaseAddress + Size,
|
2013-09-30 19:40:09 +00:00
|
|
|
VACB_MAPPING_GRANULARITY - Size);
|
2002-01-26 21:21:02 +00:00
|
|
|
}
|
2010-12-28 21:34:54 +00:00
|
|
|
|
|
|
|
return STATUS_SUCCESS;
|
2001-10-10 21:46:13 +00:00
|
|
|
}
|
|
|
|
|
2005-05-09 01:38:29 +00:00
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2014-04-12 09:31:07 +00:00
|
|
|
CcWriteVirtualAddress (
|
|
|
|
PROS_VACB Vacb)
|
2001-10-10 21:46:13 +00:00
|
|
|
{
|
2010-12-28 21:34:54 +00:00
|
|
|
ULONG Size;
|
|
|
|
PMDL Mdl;
|
|
|
|
NTSTATUS Status;
|
|
|
|
IO_STATUS_BLOCK IoStatus;
|
|
|
|
KEVENT Event;
|
2018-09-21 06:31:05 +00:00
|
|
|
ULARGE_INTEGER LargeSize;
|
2001-10-10 21:46:13 +00:00
|
|
|
|
2018-09-21 06:31:05 +00:00
|
|
|
LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
|
|
|
|
if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
|
2002-07-17 21:04:57 +00:00
|
|
|
{
|
2018-09-21 06:31:05 +00:00
|
|
|
LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
|
2002-07-17 21:04:57 +00:00
|
|
|
}
|
2018-09-21 06:31:05 +00:00
|
|
|
Size = LargeSize.LowPart;
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 08:29:22 +00:00
|
|
|
//
|
|
|
|
// Nonpaged pool PDEs in ReactOS must actually be synchronized between the
|
|
|
|
// MmGlobalPageDirectory and the real system PDE directory. What a mess...
|
|
|
|
//
|
|
|
|
{
|
2011-06-25 10:26:59 +00:00
|
|
|
ULONG i = 0;
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 08:29:22 +00:00
|
|
|
do
|
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 08:29:22 +00:00
|
|
|
} while (++i < (Size >> PAGE_SHIFT));
|
|
|
|
}
|
2010-12-28 21:34:54 +00:00
|
|
|
|
2018-09-23 08:31:01 +00:00
|
|
|
ASSERT(Size <= VACB_MAPPING_GRANULARITY);
|
|
|
|
ASSERT(Size > 0);
|
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
|
2010-12-28 21:34:54 +00:00
|
|
|
if (!Mdl)
|
2002-07-17 21:04:57 +00:00
|
|
|
{
|
2010-12-28 21:40:23 +00:00
|
|
|
return STATUS_INSUFFICIENT_RESOURCES;
|
2002-07-17 21:04:57 +00:00
|
|
|
}
|
2017-09-13 16:13:21 +00:00
|
|
|
|
|
|
|
Status = STATUS_SUCCESS;
|
|
|
|
_SEH2_TRY
|
2010-12-28 21:34:54 +00:00
|
|
|
{
|
2017-09-13 16:13:21 +00:00
|
|
|
MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
|
|
|
|
}
|
|
|
|
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
|
|
|
|
{
|
|
|
|
Status = _SEH2_GetExceptionCode();
|
2018-04-08 16:45:38 +00:00
|
|
|
DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
|
2017-09-13 16:13:21 +00:00
|
|
|
KeBugCheck(CACHE_MANAGER);
|
|
|
|
} _SEH2_END;
|
|
|
|
|
|
|
|
if (NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
KeInitializeEvent(&Event, NotificationEvent, FALSE);
|
|
|
|
Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
|
|
|
|
if (Status == STATUS_PENDING)
|
|
|
|
{
|
|
|
|
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
|
|
|
|
Status = IoStatus.Status;
|
|
|
|
}
|
|
|
|
|
|
|
|
MmUnlockPages(Mdl);
|
2010-12-28 21:34:54 +00:00
|
|
|
}
|
|
|
|
IoFreeMdl(Mdl);
|
|
|
|
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
|
|
|
|
{
|
|
|
|
DPRINT1("IoPageWrite failed, Status %x\n", Status);
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
|
|
|
return STATUS_SUCCESS;
|
2001-10-10 21:46:13 +00:00
|
|
|
}
|
|
|
|
|
2014-08-18 03:55:59 +00:00
|
|
|
NTSTATUS
|
|
|
|
ReadWriteOrZero(
|
|
|
|
_Inout_ PVOID BaseAddress,
|
|
|
|
_Inout_opt_ PVOID Buffer,
|
|
|
|
_In_ ULONG Length,
|
|
|
|
_In_ CC_COPY_OPERATION Operation)
|
2004-06-19 05:04:33 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
NTSTATUS Status = STATUS_SUCCESS;
|
2004-06-19 05:04:33 +00:00
|
|
|
|
2014-08-18 03:55:59 +00:00
|
|
|
if (Operation == CcOperationZero)
|
|
|
|
{
|
|
|
|
/* Zero */
|
|
|
|
RtlZeroMemory(BaseAddress, Length);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
_SEH2_TRY
|
|
|
|
{
|
|
|
|
if (Operation == CcOperationWrite)
|
|
|
|
RtlCopyMemory(BaseAddress, Buffer, Length);
|
|
|
|
else
|
|
|
|
RtlCopyMemory(Buffer, BaseAddress, Length);
|
|
|
|
}
|
|
|
|
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
|
|
|
|
{
|
|
|
|
Status = _SEH2_GetExceptionCode();
|
|
|
|
}
|
|
|
|
_SEH2_END;
|
|
|
|
}
|
|
|
|
return Status;
|
|
|
|
}
|
2004-06-19 05:04:33 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN
|
2014-08-18 03:55:59 +00:00
|
|
|
CcCopyData (
|
|
|
|
_In_ PFILE_OBJECT FileObject,
|
|
|
|
_In_ LONGLONG FileOffset,
|
|
|
|
_Inout_ PVOID Buffer,
|
2014-08-31 12:56:36 +00:00
|
|
|
_In_ LONGLONG Length,
|
2014-08-18 03:55:59 +00:00
|
|
|
_In_ CC_COPY_OPERATION Operation,
|
|
|
|
_In_ BOOLEAN Wait,
|
|
|
|
_Out_ PIO_STATUS_BLOCK IoStatus)
|
2001-10-10 21:46:13 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
NTSTATUS Status;
|
|
|
|
LONGLONG CurrentOffset;
|
|
|
|
ULONG BytesCopied;
|
|
|
|
KIRQL OldIrql;
|
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
|
|
|
PLIST_ENTRY ListEntry;
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB Vacb;
|
2014-08-18 03:55:59 +00:00
|
|
|
ULONG PartialLength;
|
|
|
|
PVOID BaseAddress;
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN Valid;
|
2018-02-09 09:06:17 +00:00
|
|
|
PPRIVATE_CACHE_MAP PrivateCacheMap;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
2018-02-09 09:06:17 +00:00
|
|
|
PrivateCacheMap = FileObject->PrivateCacheMap;
|
2014-08-18 03:55:59 +00:00
|
|
|
CurrentOffset = FileOffset;
|
|
|
|
BytesCopied = 0;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
if (!Wait)
|
2002-01-26 21:21:02 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
/* test if the requested data is available */
|
|
|
|
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
|
2013-09-29 20:52:23 +00:00
|
|
|
/* FIXME: this loop doesn't take into account areas that don't have
|
2014-04-12 09:31:07 +00:00
|
|
|
* a VACB in the list yet */
|
2014-08-18 03:55:59 +00:00
|
|
|
ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
|
|
|
while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
Vacb = CONTAINING_RECORD(ListEntry,
|
|
|
|
ROS_VACB,
|
|
|
|
CacheMapVacbListEntry);
|
|
|
|
ListEntry = ListEntry->Flink;
|
|
|
|
if (!Vacb->Valid &&
|
|
|
|
DoRangesIntersect(Vacb->FileOffset.QuadPart,
|
2014-04-12 11:51:46 +00:00
|
|
|
VACB_MAPPING_GRANULARITY,
|
2014-08-18 03:55:59 +00:00
|
|
|
CurrentOffset, Length))
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
|
|
|
|
/* data not available */
|
2012-12-19 11:11:34 +00:00
|
|
|
return FALSE;
|
|
|
|
}
|
2014-08-18 03:55:59 +00:00
|
|
|
if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
|
2013-09-29 21:08:15 +00:00
|
|
|
break;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2014-08-18 03:55:59 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
|
2002-01-26 21:21:02 +00:00
|
|
|
}
|
|
|
|
|
2014-08-18 03:55:59 +00:00
|
|
|
PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
|
|
|
|
if (PartialLength != 0)
|
2002-01-26 21:21:02 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
|
2014-04-12 10:59:48 +00:00
|
|
|
Status = CcRosRequestVacb(SharedCacheMap,
|
2014-08-18 03:55:59 +00:00
|
|
|
ROUND_DOWN(CurrentOffset,
|
2014-04-12 09:31:07 +00:00
|
|
|
VACB_MAPPING_GRANULARITY),
|
2014-08-18 03:55:59 +00:00
|
|
|
&BaseAddress,
|
|
|
|
&Valid,
|
|
|
|
&Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
2014-08-18 03:55:59 +00:00
|
|
|
ExRaiseStatus(Status);
|
2012-12-19 11:11:34 +00:00
|
|
|
if (!Valid)
|
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
Status = CcReadVirtualAddress(Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
2014-08-18 03:55:59 +00:00
|
|
|
ExRaiseStatus(Status);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
}
|
2014-08-18 03:55:59 +00:00
|
|
|
Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
|
|
|
|
Buffer,
|
|
|
|
PartialLength,
|
|
|
|
Operation);
|
|
|
|
|
|
|
|
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
|
|
|
|
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
|
|
|
|
|
|
|
|
Length -= PartialLength;
|
|
|
|
CurrentOffset += PartialLength;
|
|
|
|
BytesCopied += PartialLength;
|
|
|
|
|
2015-08-07 19:17:40 +00:00
|
|
|
if (Operation != CcOperationZero)
|
2014-08-18 03:55:59 +00:00
|
|
|
Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
|
2005-05-09 01:38:29 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
while (Length > 0)
|
2002-01-26 21:21:02 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
|
|
|
|
PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
|
|
|
|
Status = CcRosRequestVacb(SharedCacheMap,
|
|
|
|
CurrentOffset,
|
|
|
|
&BaseAddress,
|
|
|
|
&Valid,
|
|
|
|
&Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
2014-08-18 03:55:59 +00:00
|
|
|
ExRaiseStatus(Status);
|
|
|
|
if (!Valid &&
|
|
|
|
(Operation == CcOperationRead ||
|
|
|
|
PartialLength < VACB_MAPPING_GRANULARITY))
|
2006-12-06 13:29:21 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
Status = CcReadVirtualAddress(Vacb);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
|
|
|
ExRaiseStatus(Status);
|
|
|
|
}
|
2006-12-06 13:29:21 +00:00
|
|
|
}
|
2014-08-18 03:55:59 +00:00
|
|
|
Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
|
2006-12-06 13:29:21 +00:00
|
|
|
|
2014-08-18 03:55:59 +00:00
|
|
|
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
|
2005-07-05 00:57:47 +00:00
|
|
|
|
2014-08-18 03:55:59 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
|
|
|
|
|
|
|
|
Length -= PartialLength;
|
|
|
|
CurrentOffset += PartialLength;
|
|
|
|
BytesCopied += PartialLength;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2015-08-07 19:17:40 +00:00
|
|
|
if (Operation != CcOperationZero)
|
2014-08-18 03:55:59 +00:00
|
|
|
Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
|
|
|
|
}
|
2018-02-09 09:06:17 +00:00
|
|
|
|
|
|
|
/* If that was a successful sync read operation, let's handle read ahead */
|
|
|
|
if (Operation == CcOperationRead && Length == 0 && Wait)
|
|
|
|
{
|
2018-02-28 19:54:53 +00:00
|
|
|
/* If file isn't random access and next read may get us cross VACB boundary,
|
|
|
|
* schedule next read
|
|
|
|
*/
|
|
|
|
if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) &&
|
|
|
|
(CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + BytesCopied - 1) / VACB_MAPPING_GRANULARITY)
|
2018-02-09 09:06:17 +00:00
|
|
|
{
|
|
|
|
CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And update read history in private cache map */
|
|
|
|
PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
|
|
|
|
PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
|
|
|
|
PrivateCacheMap->FileOffset2.QuadPart = FileOffset;
|
|
|
|
PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset + BytesCopied;
|
|
|
|
}
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
IoStatus->Status = STATUS_SUCCESS;
|
2014-08-18 03:55:59 +00:00
|
|
|
IoStatus->Information = BytesCopied;
|
2012-12-19 11:11:34 +00:00
|
|
|
return TRUE;
|
2001-10-10 21:46:13 +00:00
|
|
|
}
|
|
|
|
|
2018-02-07 19:27:55 +00:00
|
|
|
VOID
|
|
|
|
CcPostDeferredWrites(VOID)
|
|
|
|
{
|
|
|
|
ULONG WrittenBytes;
|
|
|
|
|
|
|
|
/* We'll try to write as much as we can */
|
|
|
|
WrittenBytes = 0;
|
|
|
|
while (TRUE)
|
|
|
|
{
|
|
|
|
KIRQL OldIrql;
|
|
|
|
PLIST_ENTRY ListEntry;
|
|
|
|
PDEFERRED_WRITE DeferredWrite;
|
|
|
|
|
|
|
|
DeferredWrite = NULL;
|
|
|
|
|
|
|
|
/* Lock our deferred writes list */
|
|
|
|
KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
|
|
|
|
for (ListEntry = CcDeferredWrites.Flink;
|
|
|
|
ListEntry != &CcDeferredWrites;
|
|
|
|
ListEntry = ListEntry->Flink)
|
|
|
|
{
|
|
|
|
/* Extract an entry */
|
|
|
|
DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
|
|
|
|
|
|
|
|
/* Compute the modified bytes, based on what we already wrote */
|
|
|
|
WrittenBytes += DeferredWrite->BytesToWrite;
|
|
|
|
/* We overflowed, give up */
|
|
|
|
if (WrittenBytes < DeferredWrite->BytesToWrite)
|
|
|
|
{
|
|
|
|
DeferredWrite = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check we can write */
|
2018-02-09 12:56:16 +00:00
|
|
|
if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, RetryForceCheckPerFile))
|
2018-02-07 19:27:55 +00:00
|
|
|
{
|
|
|
|
/* We can, so remove it from the list and stop looking for entry */
|
|
|
|
RemoveEntryList(&DeferredWrite->DeferredWriteLinks);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we don't accept modified pages, stop here */
|
|
|
|
if (!DeferredWrite->LimitModifiedPages)
|
|
|
|
{
|
|
|
|
DeferredWrite = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset count as nothing was written yet */
|
|
|
|
WrittenBytes -= DeferredWrite->BytesToWrite;
|
|
|
|
DeferredWrite = NULL;
|
|
|
|
}
|
|
|
|
KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
|
|
|
|
|
|
|
|
/* Nothing to write found, give up */
|
|
|
|
if (DeferredWrite == NULL)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we have an event, set it and quit */
|
|
|
|
if (DeferredWrite->Event)
|
|
|
|
{
|
|
|
|
KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
|
|
|
|
}
|
|
|
|
/* Otherwise, call the write routine and free the context */
|
|
|
|
else
|
|
|
|
{
|
|
|
|
DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
|
|
|
|
ExFreePoolWithTag(DeferredWrite, 'CcDw');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-09 09:06:17 +00:00
|
|
|
VOID
|
|
|
|
CcPerformReadAhead(
|
|
|
|
IN PFILE_OBJECT FileObject)
|
|
|
|
{
|
|
|
|
NTSTATUS Status;
|
|
|
|
LONGLONG CurrentOffset;
|
|
|
|
KIRQL OldIrql;
|
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
|
|
|
PROS_VACB Vacb;
|
|
|
|
ULONG PartialLength;
|
|
|
|
PVOID BaseAddress;
|
|
|
|
BOOLEAN Valid;
|
|
|
|
ULONG Length;
|
|
|
|
PPRIVATE_CACHE_MAP PrivateCacheMap;
|
|
|
|
BOOLEAN Locked;
|
|
|
|
|
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
|
|
|
|
|
|
|
/* Critical:
|
|
|
|
* PrivateCacheMap might disappear in-between if the handle
|
|
|
|
* to the file is closed (private is attached to the handle not to
|
|
|
|
* the file), so we need to lock the master lock while we deal with
|
|
|
|
* it. It won't disappear without attempting to lock such lock.
|
|
|
|
*/
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
PrivateCacheMap = FileObject->PrivateCacheMap;
|
|
|
|
/* If the handle was closed since the read ahead was scheduled, just quit */
|
|
|
|
if (PrivateCacheMap == NULL)
|
|
|
|
{
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
|
|
ObDereferenceObject(FileObject);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Otherwise, extract read offset and length and release private map */
|
|
|
|
else
|
|
|
|
{
|
|
|
|
KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
|
|
|
|
CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart;
|
|
|
|
Length = PrivateCacheMap->ReadAheadLength[1];
|
|
|
|
KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
|
|
|
|
}
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
|
|
|
|
|
|
/* Time to go! */
|
|
|
|
DPRINT("Doing ReadAhead for %p\n", FileObject);
|
|
|
|
/* Lock the file, first */
|
|
|
|
if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE))
|
|
|
|
{
|
|
|
|
Locked = FALSE;
|
|
|
|
goto Clear;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remember it's locked */
|
|
|
|
Locked = TRUE;
|
|
|
|
|
2018-02-20 12:49:54 +00:00
|
|
|
/* Don't read past the end of the file */
|
|
|
|
if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart)
|
|
|
|
{
|
|
|
|
goto Clear;
|
|
|
|
}
|
|
|
|
if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart)
|
|
|
|
{
|
|
|
|
Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset;
|
|
|
|
}
|
|
|
|
|
2018-02-09 09:06:17 +00:00
|
|
|
/* Next of the algorithm will lock like CcCopyData with the slight
|
|
|
|
* difference that we don't copy data back to an user-backed buffer
|
|
|
|
* We just bring data into Cc
|
|
|
|
*/
|
|
|
|
PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
|
|
|
|
if (PartialLength != 0)
|
|
|
|
{
|
|
|
|
PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
|
|
|
|
Status = CcRosRequestVacb(SharedCacheMap,
|
|
|
|
ROUND_DOWN(CurrentOffset,
|
|
|
|
VACB_MAPPING_GRANULARITY),
|
|
|
|
&BaseAddress,
|
|
|
|
&Valid,
|
|
|
|
&Vacb);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
DPRINT1("Failed to request VACB: %lx!\n", Status);
|
|
|
|
goto Clear;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Valid)
|
|
|
|
{
|
|
|
|
Status = CcReadVirtualAddress(Vacb);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
|
|
|
DPRINT1("Failed to read data: %lx!\n", Status);
|
|
|
|
goto Clear;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
|
|
|
|
|
|
|
Length -= PartialLength;
|
|
|
|
CurrentOffset += PartialLength;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (Length > 0)
|
|
|
|
{
|
|
|
|
ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
|
|
|
|
PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
|
|
|
|
Status = CcRosRequestVacb(SharedCacheMap,
|
|
|
|
CurrentOffset,
|
|
|
|
&BaseAddress,
|
|
|
|
&Valid,
|
|
|
|
&Vacb);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
DPRINT1("Failed to request VACB: %lx!\n", Status);
|
|
|
|
goto Clear;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Valid)
|
|
|
|
{
|
|
|
|
Status = CcReadVirtualAddress(Vacb);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
|
|
|
|
DPRINT1("Failed to read data: %lx!\n", Status);
|
|
|
|
goto Clear;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
|
|
|
|
|
|
|
|
Length -= PartialLength;
|
|
|
|
CurrentOffset += PartialLength;
|
|
|
|
}
|
|
|
|
|
|
|
|
Clear:
|
|
|
|
/* See previous comment about private cache map */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
PrivateCacheMap = FileObject->PrivateCacheMap;
|
|
|
|
if (PrivateCacheMap != NULL)
|
|
|
|
{
|
|
|
|
/* Mark read ahead as unactive */
|
|
|
|
KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
|
2018-02-20 12:18:34 +00:00
|
|
|
InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
|
2018-02-09 09:06:17 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
|
|
|
|
}
|
2018-02-18 18:32:08 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
2018-02-09 09:06:17 +00:00
|
|
|
|
|
|
|
/* If file was locked, release it */
|
|
|
|
if (Locked)
|
|
|
|
{
|
|
|
|
SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And drop our extra reference (See: CcScheduleReadAhead) */
|
|
|
|
ObDereferenceObject(FileObject);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-08-18 03:55:59 +00:00
|
|
|
/*
|
|
|
|
* @unimplemented
|
|
|
|
*/
|
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
CcCanIWrite (
|
|
|
|
IN PFILE_OBJECT FileObject,
|
|
|
|
IN ULONG BytesToWrite,
|
|
|
|
IN BOOLEAN Wait,
|
|
|
|
IN BOOLEAN Retrying)
|
|
|
|
{
|
2018-02-09 12:56:16 +00:00
|
|
|
KIRQL OldIrql;
|
2018-02-07 20:09:36 +00:00
|
|
|
KEVENT WaitEvent;
|
2018-02-09 12:56:16 +00:00
|
|
|
ULONG Length, Pages;
|
|
|
|
BOOLEAN PerFileDefer;
|
2018-02-07 20:09:36 +00:00
|
|
|
DEFERRED_WRITE Context;
|
2018-01-23 22:23:32 +00:00
|
|
|
PFSRTL_COMMON_FCB_HEADER Fcb;
|
2018-02-09 12:56:16 +00:00
|
|
|
CC_CAN_WRITE_RETRY TryContext;
|
2018-01-23 22:23:32 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
|
|
|
|
2014-11-29 21:43:39 +00:00
|
|
|
CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
|
|
|
|
FileObject, BytesToWrite, Wait, Retrying);
|
2015-06-15 18:44:08 +00:00
|
|
|
|
2018-02-09 11:25:58 +00:00
|
|
|
/* Write through is always OK */
|
|
|
|
if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH))
|
|
|
|
{
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2018-02-09 12:56:16 +00:00
|
|
|
TryContext = Retrying;
|
|
|
|
/* Allow remote file if not from posted */
|
|
|
|
if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote)
|
2018-01-23 21:56:23 +00:00
|
|
|
{
|
2018-02-09 12:56:16 +00:00
|
|
|
return TRUE;
|
2018-01-23 21:56:23 +00:00
|
|
|
}
|
|
|
|
|
2018-02-09 12:56:16 +00:00
|
|
|
/* Don't exceed max tolerated size */
|
|
|
|
Length = MAX_ZERO_LENGTH;
|
|
|
|
if (BytesToWrite < MAX_ZERO_LENGTH)
|
2018-01-23 21:56:23 +00:00
|
|
|
{
|
2018-02-09 12:56:16 +00:00
|
|
|
Length = BytesToWrite;
|
2018-01-23 21:56:23 +00:00
|
|
|
}
|
|
|
|
|
2018-12-21 18:20:59 +00:00
|
|
|
Pages = BYTES_TO_PAGES(Length);
|
2018-02-09 12:56:16 +00:00
|
|
|
|
|
|
|
/* By default, assume limits per file won't be hit */
|
|
|
|
PerFileDefer = FALSE;
|
2018-01-23 22:23:32 +00:00
|
|
|
Fcb = FileObject->FsContext;
|
2018-02-09 12:56:16 +00:00
|
|
|
/* Do we have to check for limits per file? */
|
|
|
|
if (TryContext >= RetryForceCheckPerFile ||
|
|
|
|
BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
|
2018-01-23 22:23:32 +00:00
|
|
|
{
|
2018-02-09 12:56:16 +00:00
|
|
|
/* If master is not locked, lock it now */
|
|
|
|
if (TryContext != RetryMasterLocked)
|
|
|
|
{
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
}
|
2018-01-23 22:23:32 +00:00
|
|
|
|
2018-02-09 12:56:16 +00:00
|
|
|
/* Let's not assume the file is cached... */
|
|
|
|
if (FileObject->SectionObjectPointer != NULL &&
|
|
|
|
FileObject->SectionObjectPointer->SharedCacheMap != NULL)
|
|
|
|
{
|
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
|
|
|
/* Do we have limits per file set? */
|
|
|
|
if (SharedCacheMap->DirtyPageThreshold != 0 &&
|
|
|
|
SharedCacheMap->DirtyPages != 0)
|
|
|
|
{
|
|
|
|
/* Yes, check whether they are blocking */
|
|
|
|
if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
|
|
|
|
{
|
|
|
|
PerFileDefer = TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And don't forget to release master */
|
|
|
|
if (TryContext != RetryMasterLocked)
|
|
|
|
{
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
|
|
}
|
2018-01-23 22:23:32 +00:00
|
|
|
}
|
|
|
|
|
2018-02-09 12:56:16 +00:00
|
|
|
/* So, now allow write if:
|
|
|
|
* - Not the first try or we have no throttling yet
|
|
|
|
* AND:
|
2018-02-09 13:22:00 +00:00
|
|
|
* - We don't exceed threshold!
|
|
|
|
* - We don't exceed what Mm can allow us to use
|
|
|
|
* + If we're above top, that's fine
|
|
|
|
* + If we're above bottom with limited modified pages, that's fine
|
|
|
|
* + Otherwise, throttle!
|
2018-01-23 22:23:32 +00:00
|
|
|
*/
|
2018-02-09 12:56:16 +00:00
|
|
|
if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) &&
|
|
|
|
CcTotalDirtyPages + Pages < CcDirtyPageThreshold &&
|
2018-02-09 13:22:00 +00:00
|
|
|
(MmAvailablePages > MmThrottleTop ||
|
|
|
|
(MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) &&
|
2018-02-09 12:56:16 +00:00
|
|
|
!PerFileDefer)
|
2018-02-07 20:09:36 +00:00
|
|
|
{
|
2018-02-09 12:56:16 +00:00
|
|
|
return TRUE;
|
2018-02-07 20:09:36 +00:00
|
|
|
}
|
|
|
|
|
2018-02-09 12:56:16 +00:00
|
|
|
/* If we can wait, we'll start the wait loop for waiting till we can
|
|
|
|
* write for real
|
2018-02-07 20:09:36 +00:00
|
|
|
*/
|
|
|
|
if (!Wait)
|
2018-01-23 22:23:32 +00:00
|
|
|
{
|
|
|
|
return FALSE;
|
|
|
|
}
|
2018-01-23 21:56:23 +00:00
|
|
|
|
2018-02-07 20:09:36 +00:00
|
|
|
/* Otherwise, if there are no deferred writes yet, start the lazy writer */
|
|
|
|
if (IsListEmpty(&CcDeferredWrites))
|
|
|
|
{
|
|
|
|
KIRQL OldIrql;
|
|
|
|
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
CcScheduleLazyWriteScan(TRUE);
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize our wait event */
|
|
|
|
KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
|
|
|
|
|
|
|
|
/* And prepare a dummy context */
|
|
|
|
Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
|
|
|
|
Context.NodeByteSize = sizeof(DEFERRED_WRITE);
|
|
|
|
Context.FileObject = FileObject;
|
|
|
|
Context.BytesToWrite = BytesToWrite;
|
|
|
|
Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
|
|
|
|
Context.Event = &WaitEvent;
|
|
|
|
|
|
|
|
/* And queue it */
|
|
|
|
if (Retrying)
|
|
|
|
{
|
|
|
|
/* To the top, if that's a retry */
|
|
|
|
ExInterlockedInsertHeadList(&CcDeferredWrites,
|
|
|
|
&Context.DeferredWriteLinks,
|
|
|
|
&CcDeferredWriteSpinLock);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* To the bottom, if that's a first time */
|
|
|
|
ExInterlockedInsertTailList(&CcDeferredWrites,
|
|
|
|
&Context.DeferredWriteLinks,
|
|
|
|
&CcDeferredWriteSpinLock);
|
|
|
|
}
|
|
|
|
|
2018-07-15 07:57:16 +00:00
|
|
|
DPRINT1("Actively deferring write for: %p\n", FileObject);
|
2018-02-07 20:09:36 +00:00
|
|
|
/* Now, we'll loop until our event is set. When it is set, it means that caller
|
|
|
|
* can immediately write, and has to
|
|
|
|
*/
|
|
|
|
do
|
|
|
|
{
|
|
|
|
CcPostDeferredWrites();
|
|
|
|
} while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS);
|
|
|
|
|
2015-06-15 18:44:08 +00:00
|
|
|
return TRUE;
|
2014-08-18 03:55:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @implemented
|
|
|
|
*/
|
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
CcCopyRead (
|
|
|
|
IN PFILE_OBJECT FileObject,
|
|
|
|
IN PLARGE_INTEGER FileOffset,
|
|
|
|
IN ULONG Length,
|
|
|
|
IN BOOLEAN Wait,
|
|
|
|
OUT PVOID Buffer,
|
|
|
|
OUT PIO_STATUS_BLOCK IoStatus)
|
|
|
|
{
|
2014-11-29 21:43:39 +00:00
|
|
|
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
|
|
|
|
FileObject, FileOffset->QuadPart, Length, Wait);
|
|
|
|
|
2014-08-18 03:55:59 +00:00
|
|
|
DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
|
|
|
|
"Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
|
|
|
|
FileObject, FileOffset->QuadPart, Length, Wait,
|
|
|
|
Buffer, IoStatus);
|
|
|
|
|
|
|
|
return CcCopyData(FileObject,
|
|
|
|
FileOffset->QuadPart,
|
|
|
|
Buffer,
|
|
|
|
Length,
|
|
|
|
CcOperationRead,
|
|
|
|
Wait,
|
|
|
|
IoStatus);
|
|
|
|
}
|
|
|
|
|
2003-07-10 06:27:13 +00:00
|
|
|
/*
|
|
|
|
* @implemented
|
|
|
|
*/
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
CcCopyWrite (
|
|
|
|
IN PFILE_OBJECT FileObject,
|
|
|
|
IN PLARGE_INTEGER FileOffset,
|
|
|
|
IN ULONG Length,
|
|
|
|
IN BOOLEAN Wait,
|
|
|
|
IN PVOID Buffer)
|
2001-10-10 21:46:13 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
IO_STATUS_BLOCK IoStatus;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-11-29 21:43:39 +00:00
|
|
|
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
|
|
|
|
FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
|
2013-02-16 17:37:17 +00:00
|
|
|
"Length %lu, Wait %u, Buffer 0x%p)\n",
|
2012-12-19 11:11:34 +00:00
|
|
|
FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
|
|
|
|
|
2014-08-18 03:55:59 +00:00
|
|
|
return CcCopyData(FileObject,
|
|
|
|
FileOffset->QuadPart,
|
2013-09-30 20:06:18 +00:00
|
|
|
Buffer,
|
2014-08-18 03:55:59 +00:00
|
|
|
Length,
|
|
|
|
CcOperationWrite,
|
|
|
|
Wait,
|
|
|
|
&IoStatus);
|
2002-07-17 21:04:57 +00:00
|
|
|
}
|
|
|
|
|
2004-06-21 04:11:44 +00:00
|
|
|
/*
|
2018-01-23 21:56:23 +00:00
|
|
|
* @implemented
|
2004-06-21 04:11:44 +00:00
|
|
|
*/
|
|
|
|
VOID
|
2008-11-29 20:47:48 +00:00
|
|
|
NTAPI
|
2004-06-21 04:11:44 +00:00
|
|
|
CcDeferWrite (
|
2012-12-19 11:11:34 +00:00
|
|
|
IN PFILE_OBJECT FileObject,
|
|
|
|
IN PCC_POST_DEFERRED_WRITE PostRoutine,
|
|
|
|
IN PVOID Context1,
|
|
|
|
IN PVOID Context2,
|
|
|
|
IN ULONG BytesToWrite,
|
|
|
|
IN BOOLEAN Retrying)
|
2004-06-21 04:11:44 +00:00
|
|
|
{
|
2018-02-07 20:35:22 +00:00
|
|
|
KIRQL OldIrql;
|
2018-01-28 10:55:40 +00:00
|
|
|
PDEFERRED_WRITE Context;
|
2018-02-07 20:11:39 +00:00
|
|
|
PFSRTL_COMMON_FCB_HEADER Fcb;
|
2018-01-23 21:56:23 +00:00
|
|
|
|
2014-11-29 21:43:39 +00:00
|
|
|
CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
|
|
|
|
FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
|
|
|
|
|
2018-01-23 21:56:23 +00:00
|
|
|
/* Try to allocate a context for queueing the write operation */
|
2018-01-28 10:55:40 +00:00
|
|
|
Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
|
2018-01-23 21:56:23 +00:00
|
|
|
/* If it failed, immediately execute the operation! */
|
|
|
|
if (Context == NULL)
|
|
|
|
{
|
|
|
|
PostRoutine(Context1, Context2);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-07 20:11:39 +00:00
|
|
|
Fcb = FileObject->FsContext;
|
|
|
|
|
2018-01-23 21:56:23 +00:00
|
|
|
/* Otherwise, initialize the context */
|
2018-01-28 10:55:40 +00:00
|
|
|
RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
|
|
|
|
Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
|
|
|
|
Context->NodeByteSize = sizeof(DEFERRED_WRITE);
|
2018-01-23 21:56:23 +00:00
|
|
|
Context->FileObject = FileObject;
|
|
|
|
Context->PostRoutine = PostRoutine;
|
|
|
|
Context->Context1 = Context1;
|
|
|
|
Context->Context2 = Context2;
|
|
|
|
Context->BytesToWrite = BytesToWrite;
|
2018-02-07 20:11:39 +00:00
|
|
|
Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
|
2018-01-23 21:56:23 +00:00
|
|
|
|
|
|
|
/* And queue it */
|
|
|
|
if (Retrying)
|
|
|
|
{
|
|
|
|
/* To the top, if that's a retry */
|
|
|
|
ExInterlockedInsertHeadList(&CcDeferredWrites,
|
2018-01-28 10:55:40 +00:00
|
|
|
&Context->DeferredWriteLinks,
|
2018-01-23 21:56:23 +00:00
|
|
|
&CcDeferredWriteSpinLock);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* To the bottom, if that's a first time */
|
|
|
|
ExInterlockedInsertTailList(&CcDeferredWrites,
|
2018-01-28 10:55:40 +00:00
|
|
|
&Context->DeferredWriteLinks,
|
2018-01-23 21:56:23 +00:00
|
|
|
&CcDeferredWriteSpinLock);
|
|
|
|
}
|
2018-02-07 17:10:59 +00:00
|
|
|
|
2018-02-07 19:27:55 +00:00
|
|
|
/* Try to execute the posted writes */
|
|
|
|
CcPostDeferredWrites();
|
|
|
|
|
2018-02-07 20:35:22 +00:00
|
|
|
/* Schedule a lazy writer run to handle deferred writes */
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
2018-02-07 17:10:59 +00:00
|
|
|
if (!LazyWriter.ScanActive)
|
|
|
|
{
|
|
|
|
CcScheduleLazyWriteScan(FALSE);
|
|
|
|
}
|
2018-02-07 20:35:22 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
2004-06-21 04:11:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @unimplemented
|
|
|
|
*/
|
2005-01-01 01:51:05 +00:00
|
|
|
VOID
|
2008-11-29 20:47:48 +00:00
|
|
|
NTAPI
|
2004-06-21 04:11:44 +00:00
|
|
|
CcFastCopyRead (
|
2012-12-19 11:11:34 +00:00
|
|
|
IN PFILE_OBJECT FileObject,
|
|
|
|
IN ULONG FileOffset,
|
|
|
|
IN ULONG Length,
|
|
|
|
IN ULONG PageCount,
|
2005-01-01 01:51:05 +00:00
|
|
|
OUT PVOID Buffer,
|
2012-12-19 11:11:34 +00:00
|
|
|
OUT PIO_STATUS_BLOCK IoStatus)
|
2004-06-21 04:11:44 +00:00
|
|
|
{
|
2015-06-15 18:44:08 +00:00
|
|
|
LARGE_INTEGER LargeFileOffset;
|
|
|
|
BOOLEAN Success;
|
|
|
|
|
2014-11-29 21:43:39 +00:00
|
|
|
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
|
|
|
|
FileObject, FileOffset, Length, PageCount, Buffer);
|
|
|
|
|
2015-06-15 18:44:08 +00:00
|
|
|
DBG_UNREFERENCED_PARAMETER(PageCount);
|
|
|
|
|
|
|
|
LargeFileOffset.QuadPart = FileOffset;
|
|
|
|
Success = CcCopyRead(FileObject,
|
|
|
|
&LargeFileOffset,
|
|
|
|
Length,
|
|
|
|
TRUE,
|
|
|
|
Buffer,
|
|
|
|
IoStatus);
|
2015-09-01 01:45:59 +00:00
|
|
|
ASSERT(Success == TRUE);
|
2004-06-21 04:11:44 +00:00
|
|
|
}
|
2015-06-15 18:44:08 +00:00
|
|
|
|
2004-06-21 04:11:44 +00:00
|
|
|
/*
|
|
|
|
* @unimplemented
|
|
|
|
*/
|
2005-01-01 01:51:05 +00:00
|
|
|
VOID
|
2008-11-29 20:47:48 +00:00
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
CcFastCopyWrite (
|
|
|
|
IN PFILE_OBJECT FileObject,
|
|
|
|
IN ULONG FileOffset,
|
|
|
|
IN ULONG Length,
|
|
|
|
IN PVOID Buffer)
|
2004-06-21 04:11:44 +00:00
|
|
|
{
|
2015-06-15 18:44:08 +00:00
|
|
|
LARGE_INTEGER LargeFileOffset;
|
|
|
|
BOOLEAN Success;
|
|
|
|
|
2014-11-29 21:43:39 +00:00
|
|
|
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
|
|
|
|
FileObject, FileOffset, Length, Buffer);
|
|
|
|
|
2015-06-15 18:44:08 +00:00
|
|
|
LargeFileOffset.QuadPart = FileOffset;
|
|
|
|
Success = CcCopyWrite(FileObject,
|
|
|
|
&LargeFileOffset,
|
|
|
|
Length,
|
|
|
|
TRUE,
|
|
|
|
Buffer);
|
2015-09-01 01:45:59 +00:00
|
|
|
ASSERT(Success == TRUE);
|
2004-06-21 04:11:44 +00:00
|
|
|
}
|
|
|
|
|
2003-07-10 06:27:13 +00:00
|
|
|
/*
|
|
|
|
* @implemented
|
|
|
|
*/
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
CcZeroData (
|
|
|
|
IN PFILE_OBJECT FileObject,
|
|
|
|
IN PLARGE_INTEGER StartOffset,
|
|
|
|
IN PLARGE_INTEGER EndOffset,
|
|
|
|
IN BOOLEAN Wait)
|
2002-07-17 21:04:57 +00:00
|
|
|
{
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS Status;
|
|
|
|
LARGE_INTEGER WriteOffset;
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG Length;
|
2012-12-19 11:11:34 +00:00
|
|
|
ULONG CurrentLength;
|
|
|
|
PMDL Mdl;
|
|
|
|
ULONG i;
|
|
|
|
IO_STATUS_BLOCK Iosb;
|
|
|
|
KEVENT Event;
|
|
|
|
|
2014-11-29 21:43:39 +00:00
|
|
|
CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
|
|
|
|
FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
|
2013-02-16 17:37:17 +00:00
|
|
|
"Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
|
2012-12-19 11:11:34 +00:00
|
|
|
Wait);
|
|
|
|
|
2014-08-31 12:56:36 +00:00
|
|
|
Length = EndOffset->QuadPart - StartOffset->QuadPart;
|
2012-12-19 11:11:34 +00:00
|
|
|
WriteOffset.QuadPart = StartOffset->QuadPart;
|
|
|
|
|
|
|
|
if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
|
2002-07-17 21:04:57 +00:00
|
|
|
{
|
2012-12-19 11:11:34 +00:00
|
|
|
/* File is not cached */
|
|
|
|
|
|
|
|
Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
|
|
|
|
|
|
|
|
while (Length > 0)
|
2011-12-19 02:56:58 +00:00
|
|
|
{
|
2014-08-31 12:56:36 +00:00
|
|
|
if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-08-31 12:56:36 +00:00
|
|
|
CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
CurrentLength = Length;
|
|
|
|
}
|
|
|
|
MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
|
|
|
|
Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
|
|
|
|
for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
|
|
|
|
{
|
|
|
|
((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
|
|
|
|
}
|
|
|
|
KeInitializeEvent(&Event, NotificationEvent, FALSE);
|
|
|
|
Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
|
|
|
|
if (Status == STATUS_PENDING)
|
|
|
|
{
|
|
|
|
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
|
|
|
|
Status = Iosb.Status;
|
|
|
|
}
|
|
|
|
if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
|
|
|
|
{
|
|
|
|
MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
|
|
|
|
}
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
WriteOffset.QuadPart += CurrentLength;
|
|
|
|
Length -= CurrentLength;
|
2011-12-19 02:56:58 +00:00
|
|
|
}
|
2002-07-17 21:04:57 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
else
|
2002-07-17 21:04:57 +00:00
|
|
|
{
|
2014-08-18 03:55:59 +00:00
|
|
|
IO_STATUS_BLOCK IoStatus;
|
|
|
|
|
|
|
|
return CcCopyData(FileObject,
|
|
|
|
WriteOffset.QuadPart,
|
|
|
|
NULL,
|
|
|
|
Length,
|
|
|
|
CcOperationZero,
|
|
|
|
Wait,
|
|
|
|
&IoStatus);
|
2002-07-17 21:04:57 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
return TRUE;
|
2002-06-10 21:11:56 +00:00
|
|
|
}
|