reactos/ntoskrnl/cc/copy.c

615 lines
16 KiB
C
Raw Normal View History

/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/cc/copy.c
* PURPOSE: Implements cache managers copy interface
*
* PROGRAMMERS:
*/
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* GLOBALS *******************************************************************/
static PFN_NUMBER CcZeroPage = 0;
#define MAX_ZERO_LENGTH (256 * 1024)
typedef enum _CC_COPY_OPERATION
{
CcOperationRead,
CcOperationWrite,
CcOperationZero
} CC_COPY_OPERATION;
ULONG CcRosTraceLevel = 0;
ULONG CcFastMdlReadWait;
ULONG CcFastMdlReadNotPossible;
ULONG CcFastReadNotPossible;
ULONG CcFastReadWait;
ULONG CcFastReadNoWait;
ULONG CcFastReadResourceMiss;
/* FUNCTIONS *****************************************************************/
VOID
NTAPI
MiZeroPhysicalPage (
IN PFN_NUMBER PageFrameIndex
);
VOID
NTAPI
CcInitCacheZeroPage (
VOID)
{
NTSTATUS Status;
MI_SET_USAGE(MI_USAGE_CACHE);
//MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
if (!NT_SUCCESS(Status))
{
DbgPrint("Can't allocate CcZeroPage.\n");
KeBugCheck(CACHE_MANAGER);
}
MiZeroPhysicalPage(CcZeroPage);
}
NTSTATUS
NTAPI
CcReadVirtualAddress (
PROS_VACB Vacb)
{
ULONG Size, Pages;
PMDL Mdl;
NTSTATUS Status;
IO_STATUS_BLOCK IoStatus;
KEVENT Event;
Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
if (Size > VACB_MAPPING_GRANULARITY)
{
Size = VACB_MAPPING_GRANULARITY;
}
Pages = BYTES_TO_PAGES(Size);
ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
if (!Mdl)
{
return STATUS_INSUFFICIENT_RESOURCES;
}
Status = STATUS_SUCCESS;
_SEH2_TRY
{
MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
}
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
{
Status = _SEH2_GetExceptionCode();
KeBugCheck(CACHE_MANAGER);
} _SEH2_END;
if (NT_SUCCESS(Status))
{
Mdl->MdlFlags |= MDL_IO_PAGE_READ;
KeInitializeEvent(&Event, NotificationEvent, FALSE);
Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
if (Status == STATUS_PENDING)
{
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
Status = IoStatus.Status;
}
MmUnlockPages(Mdl);
}
IoFreeMdl(Mdl);
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
{
DPRINT1("IoPageRead failed, Status %x\n", Status);
return Status;
}
if (Size < VACB_MAPPING_GRANULARITY)
{
RtlZeroMemory((char*)Vacb->BaseAddress + Size,
VACB_MAPPING_GRANULARITY - Size);
}
return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
CcWriteVirtualAddress (
PROS_VACB Vacb)
{
ULONG Size;
PMDL Mdl;
NTSTATUS Status;
IO_STATUS_BLOCK IoStatus;
KEVENT Event;
Vacb->Dirty = FALSE;
Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
if (Size > VACB_MAPPING_GRANULARITY)
{
Size = VACB_MAPPING_GRANULARITY;
}
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 08:29:22 +00:00
//
// Nonpaged pool PDEs in ReactOS must actually be synchronized between the
// MmGlobalPageDirectory and the real system PDE directory. What a mess...
//
{
ULONG i = 0;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 08:29:22 +00:00
do
{
MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 08:29:22 +00:00
} while (++i < (Size >> PAGE_SHIFT));
}
Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
if (!Mdl)
{
return STATUS_INSUFFICIENT_RESOURCES;
}
Status = STATUS_SUCCESS;
_SEH2_TRY
{
MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
}
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
{
Status = _SEH2_GetExceptionCode();
KeBugCheck(CACHE_MANAGER);
} _SEH2_END;
if (NT_SUCCESS(Status))
{
KeInitializeEvent(&Event, NotificationEvent, FALSE);
Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
if (Status == STATUS_PENDING)
{
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
Status = IoStatus.Status;
}
MmUnlockPages(Mdl);
}
IoFreeMdl(Mdl);
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
{
DPRINT1("IoPageWrite failed, Status %x\n", Status);
Vacb->Dirty = TRUE;
return Status;
}
return STATUS_SUCCESS;
}
NTSTATUS
ReadWriteOrZero(
_Inout_ PVOID BaseAddress,
_Inout_opt_ PVOID Buffer,
_In_ ULONG Length,
_In_ CC_COPY_OPERATION Operation)
{
NTSTATUS Status = STATUS_SUCCESS;
if (Operation == CcOperationZero)
{
/* Zero */
RtlZeroMemory(BaseAddress, Length);
}
else
{
_SEH2_TRY
{
if (Operation == CcOperationWrite)
RtlCopyMemory(BaseAddress, Buffer, Length);
else
RtlCopyMemory(Buffer, BaseAddress, Length);
}
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
Status = _SEH2_GetExceptionCode();
}
_SEH2_END;
}
return Status;
}
BOOLEAN
CcCopyData (
_In_ PFILE_OBJECT FileObject,
_In_ LONGLONG FileOffset,
_Inout_ PVOID Buffer,
_In_ LONGLONG Length,
_In_ CC_COPY_OPERATION Operation,
_In_ BOOLEAN Wait,
_Out_ PIO_STATUS_BLOCK IoStatus)
{
NTSTATUS Status;
LONGLONG CurrentOffset;
ULONG BytesCopied;
KIRQL OldIrql;
PROS_SHARED_CACHE_MAP SharedCacheMap;
PLIST_ENTRY ListEntry;
PROS_VACB Vacb;
ULONG PartialLength;
PVOID BaseAddress;
BOOLEAN Valid;
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
CurrentOffset = FileOffset;
BytesCopied = 0;
if (!Wait)
{
/* test if the requested data is available */
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
/* FIXME: this loop doesn't take into account areas that don't have
* a VACB in the list yet */
ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
{
Vacb = CONTAINING_RECORD(ListEntry,
ROS_VACB,
CacheMapVacbListEntry);
ListEntry = ListEntry->Flink;
if (!Vacb->Valid &&
DoRangesIntersect(Vacb->FileOffset.QuadPart,
VACB_MAPPING_GRANULARITY,
CurrentOffset, Length))
{
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
/* data not available */
return FALSE;
}
if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
break;
}
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
}
PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
if (PartialLength != 0)
{
PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
Status = CcRosRequestVacb(SharedCacheMap,
ROUND_DOWN(CurrentOffset,
VACB_MAPPING_GRANULARITY),
&BaseAddress,
&Valid,
&Vacb);
if (!NT_SUCCESS(Status))
ExRaiseStatus(Status);
if (!Valid)
{
Status = CcReadVirtualAddress(Vacb);
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
ExRaiseStatus(Status);
}
}
Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
Buffer,
PartialLength,
Operation);
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
if (!NT_SUCCESS(Status))
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
Length -= PartialLength;
CurrentOffset += PartialLength;
BytesCopied += PartialLength;
if (Operation != CcOperationZero)
Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
}
while (Length > 0)
{
ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
Status = CcRosRequestVacb(SharedCacheMap,
CurrentOffset,
&BaseAddress,
&Valid,
&Vacb);
if (!NT_SUCCESS(Status))
ExRaiseStatus(Status);
if (!Valid &&
(Operation == CcOperationRead ||
PartialLength < VACB_MAPPING_GRANULARITY))
{
Status = CcReadVirtualAddress(Vacb);
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
ExRaiseStatus(Status);
}
}
Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
if (!NT_SUCCESS(Status))
ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
Length -= PartialLength;
CurrentOffset += PartialLength;
BytesCopied += PartialLength;
if (Operation != CcOperationZero)
Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
}
IoStatus->Status = STATUS_SUCCESS;
IoStatus->Information = BytesCopied;
return TRUE;
}
/*
* @unimplemented
*/
BOOLEAN
NTAPI
CcCanIWrite (
IN PFILE_OBJECT FileObject,
IN ULONG BytesToWrite,
IN BOOLEAN Wait,
IN BOOLEAN Retrying)
{
CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
FileObject, BytesToWrite, Wait, Retrying);
return TRUE;
}
/*
* @implemented
*/
BOOLEAN
NTAPI
CcCopyRead (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN BOOLEAN Wait,
OUT PVOID Buffer,
OUT PIO_STATUS_BLOCK IoStatus)
{
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
FileObject, FileOffset->QuadPart, Length, Wait);
DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
"Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
FileObject, FileOffset->QuadPart, Length, Wait,
Buffer, IoStatus);
return CcCopyData(FileObject,
FileOffset->QuadPart,
Buffer,
Length,
CcOperationRead,
Wait,
IoStatus);
}
/*
* @implemented
*/
BOOLEAN
NTAPI
CcCopyWrite (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN BOOLEAN Wait,
IN PVOID Buffer)
{
IO_STATUS_BLOCK IoStatus;
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
"Length %lu, Wait %u, Buffer 0x%p)\n",
FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
return CcCopyData(FileObject,
FileOffset->QuadPart,
Buffer,
Length,
CcOperationWrite,
Wait,
&IoStatus);
}
/*
* @unimplemented
*/
VOID
NTAPI
CcDeferWrite (
IN PFILE_OBJECT FileObject,
IN PCC_POST_DEFERRED_WRITE PostRoutine,
IN PVOID Context1,
IN PVOID Context2,
IN ULONG BytesToWrite,
IN BOOLEAN Retrying)
{
CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
PostRoutine(Context1, Context2);
}
/*
* @unimplemented
*/
VOID
NTAPI
CcFastCopyRead (
IN PFILE_OBJECT FileObject,
IN ULONG FileOffset,
IN ULONG Length,
IN ULONG PageCount,
OUT PVOID Buffer,
OUT PIO_STATUS_BLOCK IoStatus)
{
LARGE_INTEGER LargeFileOffset;
BOOLEAN Success;
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
FileObject, FileOffset, Length, PageCount, Buffer);
DBG_UNREFERENCED_PARAMETER(PageCount);
LargeFileOffset.QuadPart = FileOffset;
Success = CcCopyRead(FileObject,
&LargeFileOffset,
Length,
TRUE,
Buffer,
IoStatus);
ASSERT(Success == TRUE);
}
/*
* @unimplemented
*/
VOID
NTAPI
CcFastCopyWrite (
IN PFILE_OBJECT FileObject,
IN ULONG FileOffset,
IN ULONG Length,
IN PVOID Buffer)
{
LARGE_INTEGER LargeFileOffset;
BOOLEAN Success;
CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
FileObject, FileOffset, Length, Buffer);
LargeFileOffset.QuadPart = FileOffset;
Success = CcCopyWrite(FileObject,
&LargeFileOffset,
Length,
TRUE,
Buffer);
ASSERT(Success == TRUE);
}
/*
* @unimplemented
*/
NTSTATUS
NTAPI
CcWaitForCurrentLazyWriterActivity (
VOID)
{
[0.4.7][CDFS_NEW/NTOSKRNL/NDK] Switch from our old CDFS to MS-PL CDFS_NEW The main motivation to switch to that newer driver is, that our old one simply can not read all isos. Especially complex ones made trouble and were only shown as empty in explorer. It is still possible to build and use the old driver when needed, only thing that needs to be done for that is to revert 0.4.8-dev-164-g ec6b3ecbe48ad51c366563196cc93a6ae74e8b69 Porting back the state up to 0.4.8-release-100-g8f947b5 implies: Fixing the following JIRA-IDs (or avoid introducing them): CORE-18029 "Mute noisy DPRINT 'SectionObject has ImageSection'" CORE-17405 "Fix a macro-copy-paste and shrink the binary size" CORE-15659 "Unable to build the gcc Release version in Windows using RosBE 2.1.6 (module cdfs fails)" CORE-14315 "CDFS_NEW assertion during first stage setup due to new CcPerformReadAhead" CORE-14128 "Avast! Free Antivirus 7.0 hangs the system when trying to detect a newly created virus" CORE-14067 "CDFS_NEW assertions and exceptions" CORE-14003 "Shutting down LiveCD asserts since introduction of MS PL CDFS_NEW" CORE-13184 "Restore ability to install from disk-image" by picking the following commits: 0.4.8-release-100-g 8f947b532221069d2f398e25a9c0775c34e21878 [NTOSKRNL] Mute noisy DPRINT 'SectionObject has ImageSection' CORE-18029 0.4.8-release-80-g eb1ea195888e17d026428dc6f406a1834c8702d8 [CDFS_NEW] == 0.4.15-dev-1456-g 889eab7 CORE-17405 0.4.8-release-62-g 8c07aad4a8c6469c7e4ba4c5e92e356a6548189a [CDFS_NEW/XDK] == 0.4.11-dev-39-g a2f9762 + 0.4.11-dev-40-g 6d7ec8c CORE-14067 0.4.8-release-3-g 5d976d04e876aac6990a8f997669711eca83227a [CDFS_NEW] == 0.4.12-dev-431-g bccad87f3ccf266be2179d1322e7a422aeb2f3b7 + 0.4.12-dev-432-g 3463b2db9fc08e5ebe2f5b146d3fe12248f632f3 CORE-15659 0.4.8-RC-3-g 51f9494d484bbe8232e375c2693851316d9d530f [CDFS_NEW] superseded later by the proper fix 0.4.8-release-62-g 8c07aad4a8c6469c7e4ba4c5e92e356a6548189a CORE-14067 0.4.8-dev-1069-g a5e89014dc943b2cbddb16fc4d92e13b7e5068e1 [CDFS_NEW] CORE-14315 0.4.8-dev-475-g a59d4674dec116a951392bb6d136bd11f0e143f2 [NTOSKRNL] io/iomgr/device.c (forgotten assert) CORE-14128 0.4.8-dev-221-g 9d67a24799e29e832933311a8e6ba21787e40f6a [CDFS_NEW] 0.4.8-dev-220-g 67a7e45e351b70356fee442c31cb5de14a0de37e [CDFS_NEW/DOC] 0.4.8-dev-219-g 6a3bbf24e012e4985b97ca4d79604c28fe0133d7 [CDFS_NEW] 0.4.8-dev-218-g ec26cde4a13b3889883d3e1b19f7878179c76df5 [CDFS_NEW] 0.4.8-dev-217-g bc2378a3560a384c47d2a4c83886531ad8f0af90 [CDFS_NEW] 0.4.8-dev-216-g 5429771b9936a6d8aee6ffb3860310673f467e93 [CDFS_NEW] 0.4.8-dev-215-g fd345482634d8ef4c64e5910ce4752ac0c7438cc [CDFS_NEW] Sync with MS-PL driver 0.4.8-dev-164-g ec6b3ecbe48ad51c366563196cc93a6ae74e8b69 [FILESYSTEMS] switch from CDFS to CDFS_NEW in CMakeLists.txt 0.4.8-dev-160-g 2b217e4ecf942126c4672f8d1c0a2fb55530d731 [NTOSKRNL] Mute spam CcSetReadAheadGranularity() 0.4.8-dev-159-g 64cb138a673fa02d76922963c680bfeb05f1d715 [NTOSKRNL] Mute spam CcPurgeCacheSection() 0.4.8-dev-150-g f723d230a0bc4b7b78ca125abecce137bb96bed6 [CDFS_NEW] 0.4.8-dev-133-g faee3753ea79c7e332c7a1b5eb74ca2c6614e23d [CDFS_NEW] CORE-14003 0.4.8-dev-132-g 1d777ffab5458495c04f250cf7ced379f306a7af [NTOSKRNL] iofunc.c CORE-14003 0.4.8-dev-131-g c3d5a3f2bdff97f03b802fe95dce9d0c9375e53e [NTOSKRNL] iofunc.c CORE-14003 0.4.8-dev-130-g 3b64f7f8fbbc64ada4dfdb6bdd1feff4bac10c1d [NTOSKRNL] ob/obref.c & co CORE-14003 0.4.8-dev-129-g 7eefe702949da68e49c4365d466c5373099c3b1f [NTOSKRNL] io/iomgr.c & co CORE-14003 0.4.8-dev-127-g 5f255827d3282b2dea1bc6d5ba77607550742ced [CDFS_NEW] 0.4.8-dev-126-g 1bef48796e4df655c71ddd92a00417dbe9e530ca [NTOSKRNL] just a comment, superseded later 0.4.8-dev-125-g cbf0430b56b600c29c1f615117574afcce1a9027 [CDFS_NEW] 0.4.8-dev-123-g f88fe43abd6a039f25cc08a8dfd65f9a56ef9da7 [NTOSKRNL] io/iomgr/device.c (forbidden DPRINT) 0.4.8-dev-122-g 6c733856258ebfac4b62ffb7733202dddb74a4be [CDFS_NEW] CORE-13184 0.4.8-dev-97-g 94298313c03c791d1ed472125ebf86f4258d2354 [CDFS_NEW] 0.4.8-dev-95-g e88eeb21af4b778f19b10e2d0e9f1c4361d6838d [CDFS_NEW/NTOSKRNL] CcWaitForCurrentLazyWriterActivity() stub return Success 0.4.8-dev-94-g 03d5be6437a1f2e4376ac117421133ebfdde799e [CDFS_NEW] 0.4.8-dev-93-g fa1c60db50d456fe39e315fbbe18bbee78af4105 [CDFS_NEW] 0.4.8-dev-92-g 8b2fd60829eeeb28fcafc0ef2511f7bed04ffd64 [CDFS_NEW] 0.4.8-dev-91-g e4da7ecc50b59b9f8283d84b3707fa69c595a57d [CDFS_NEW] 0.4.8-dev-90-g 7b19676e2b541983f1062ba1c563099284646010 [CDFS_NEW] 0.4.8-dev-89-g 3d4b8783fd0b9b8231a46e7b914447b86255e35b [CDFS_NEW] 0.4.8-dev-88-g 818025ecc8006c19ce6f6139b21294795f51bafd [CDFS_NEW] 0.4.8-dev-87-g 2639dd673636e4848a4fe33b9a4cd142efcc84f8 [CDFS_NEW] 0.4.8-dev-86-g 755bdb5d0b09e1df374f197c59639c0d78b86d6f [CDFS_NEW] 0.4.8-dev-85-g 3cbcb1badee213fdbadaa2db99086d21a8775fe8 [CDFS_NEW] and mute spam in opcode INSTEAD of picking: 0.4.8-dev-165-g 2284a457a36a9ce7e2b28bf38b6456ff0a6de471 [NTOSKRNL] oplock.c Fixup 0.4.8-dev-163-g d3d585395684b96df9782fe0af253f2c3e9cffc4 [NTOSKRNL] oplock.c Implement oplock-support 0.4.12-dev-232-g f488102c863c46763f36c837265a207dc59dadd5 [CDFS] was also left out for now I am aware, that the backport introduces white-space-glitches within CDFS_NEW. I decided to live with them in favor of better sync to master and newer releases.
2022-01-27 20:10:22 +00:00
return STATUS_SUCCESS;
}
/*
* @implemented
*/
BOOLEAN
NTAPI
CcZeroData (
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER StartOffset,
IN PLARGE_INTEGER EndOffset,
IN BOOLEAN Wait)
{
NTSTATUS Status;
LARGE_INTEGER WriteOffset;
LONGLONG Length;
ULONG CurrentLength;
PMDL Mdl;
ULONG i;
IO_STATUS_BLOCK Iosb;
KEVENT Event;
CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
"Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
Wait);
Length = EndOffset->QuadPart - StartOffset->QuadPart;
WriteOffset.QuadPart = StartOffset->QuadPart;
if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
{
/* File is not cached */
Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
while (Length > 0)
{
if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
{
CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
}
else
{
CurrentLength = Length;
}
MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
{
((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
}
KeInitializeEvent(&Event, NotificationEvent, FALSE);
Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
if (Status == STATUS_PENDING)
{
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
Status = Iosb.Status;
}
if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
{
MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
}
if (!NT_SUCCESS(Status))
{
return FALSE;
}
WriteOffset.QuadPart += CurrentLength;
Length -= CurrentLength;
}
}
else
{
IO_STATUS_BLOCK IoStatus;
return CcCopyData(FileObject,
WriteOffset.QuadPart,
NULL,
Length,
CcOperationZero,
Wait,
&IoStatus);
}
return TRUE;
}