2008-03-09 14:11:42 +00:00
|
|
|
/*
|
2005-01-26 13:58:37 +00:00
|
|
|
* COPYRIGHT: See COPYING in the top level directory
|
1998-12-21 15:48:21 +00:00
|
|
|
* PROJECT: ReactOS kernel
|
|
|
|
* FILE: ntoskrnl/cc/view.c
|
|
|
|
* PURPOSE: Cache manager
|
2005-01-26 13:58:37 +00:00
|
|
|
*
|
|
|
|
* PROGRAMMERS: David Welch (welch@mcmail.com)
|
2018-01-23 22:23:32 +00:00
|
|
|
* Pierre Schweitzer (pierre@reactos.org)
|
1998-12-21 15:48:21 +00:00
|
|
|
*/
|
|
|
|
|
2000-12-10 23:42:01 +00:00
|
|
|
/* NOTES **********************************************************************
|
|
|
|
*
|
2005-05-09 01:38:29 +00:00
|
|
|
* This is not the NT implementation of a file cache nor anything much like
|
|
|
|
* it.
|
2002-05-05 14:57:45 +00:00
|
|
|
*
|
2005-05-09 01:38:29 +00:00
|
|
|
* The general procedure for a filesystem to implement a read or write
|
2000-12-10 23:42:01 +00:00
|
|
|
* dispatch routine is as follows
|
2005-05-09 01:38:29 +00:00
|
|
|
*
|
2000-12-10 23:42:01 +00:00
|
|
|
* (1) If caching for the FCB hasn't been initiated then so do by calling
|
|
|
|
* CcInitializeFileCache.
|
2005-05-09 01:38:29 +00:00
|
|
|
*
|
2000-12-10 23:42:01 +00:00
|
|
|
* (2) For each 4k region which is being read or written obtain a cache page
|
2005-05-09 01:38:29 +00:00
|
|
|
* by calling CcRequestCachePage.
|
2000-12-10 23:42:01 +00:00
|
|
|
*
|
2005-05-09 01:38:29 +00:00
|
|
|
* (3) If either the page is being read or not completely written, and it is
|
2000-12-10 23:42:01 +00:00
|
|
|
* not up to date then read its data from the underlying medium. If the read
|
2005-05-09 01:38:29 +00:00
|
|
|
* fails then call CcReleaseCachePage with VALID as FALSE and return a error.
|
|
|
|
*
|
2000-12-10 23:42:01 +00:00
|
|
|
* (4) Copy the data into or out of the page as necessary.
|
2005-05-09 01:38:29 +00:00
|
|
|
*
|
2000-12-10 23:42:01 +00:00
|
|
|
* (5) Release the cache page
|
|
|
|
*/
|
|
|
|
/* INCLUDES ******************************************************************/
|
1998-12-21 15:48:21 +00:00
|
|
|
|
2004-08-15 16:39:12 +00:00
|
|
|
#include <ntoskrnl.h>
|
1999-05-29 00:15:17 +00:00
|
|
|
#define NDEBUG
|
2008-08-30 16:31:06 +00:00
|
|
|
#include <debug.h>
|
1998-12-21 15:48:21 +00:00
|
|
|
|
2001-01-01 04:42:12 +00:00
|
|
|
/* GLOBALS *******************************************************************/
|
|
|
|
|
2018-01-15 22:20:53 +00:00
|
|
|
LIST_ENTRY DirtyVacbListHead;
|
2014-04-12 09:31:07 +00:00
|
|
|
static LIST_ENTRY VacbLruListHead;
|
2001-04-09 02:45:04 +00:00
|
|
|
|
2003-01-11 15:24:38 +00:00
|
|
|
NPAGED_LOOKASIDE_LIST iBcbLookasideList;
|
2014-04-12 10:59:48 +00:00
|
|
|
static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
|
2014-04-12 09:31:07 +00:00
|
|
|
static NPAGED_LOOKASIDE_LIST VacbLookasideList;
|
2003-01-11 15:24:38 +00:00
|
|
|
|
2018-01-23 18:07:25 +00:00
|
|
|
/* Internal vars (MS):
|
|
|
|
* - Threshold above which lazy writer will start action
|
|
|
|
* - Amount of dirty pages
|
2018-01-23 21:56:23 +00:00
|
|
|
* - List for deferred writes
|
|
|
|
* - Spinlock when dealing with the deferred list
|
2018-01-24 20:24:05 +00:00
|
|
|
* - List for "clean" shared cache maps
|
2018-01-23 18:07:25 +00:00
|
|
|
*/
|
|
|
|
ULONG CcDirtyPageThreshold = 0;
|
|
|
|
ULONG CcTotalDirtyPages = 0;
|
2018-01-23 21:56:23 +00:00
|
|
|
LIST_ENTRY CcDeferredWrites;
|
|
|
|
KSPIN_LOCK CcDeferredWriteSpinLock;
|
2018-01-24 20:24:05 +00:00
|
|
|
LIST_ENTRY CcCleanSharedCacheMapList;
|
2018-01-23 18:07:25 +00:00
|
|
|
|
2009-10-17 15:03:54 +00:00
|
|
|
#if DBG
|
2018-03-24 18:15:16 +00:00
|
|
|
ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
|
2005-08-13 13:16:16 +00:00
|
|
|
{
|
2018-03-24 18:15:16 +00:00
|
|
|
ULONG Refs;
|
|
|
|
|
|
|
|
Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
|
2014-04-12 10:59:48 +00:00
|
|
|
if (vacb->SharedCacheMap->Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
|
2018-03-24 18:15:16 +00:00
|
|
|
file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2018-03-24 18:15:16 +00:00
|
|
|
|
|
|
|
return Refs;
|
2005-08-13 13:16:16 +00:00
|
|
|
}
|
2018-03-24 18:15:16 +00:00
|
|
|
ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
|
2005-08-13 13:16:16 +00:00
|
|
|
{
|
2018-03-24 18:15:16 +00:00
|
|
|
ULONG Refs;
|
2023-11-06 22:58:15 +00:00
|
|
|
BOOLEAN VacbDirty = vacb->Dirty;
|
|
|
|
BOOLEAN VacbTrace = vacb->SharedCacheMap->Trace;
|
|
|
|
BOOLEAN VacbPageOut = vacb->PageOut;
|
2018-03-24 18:15:16 +00:00
|
|
|
|
|
|
|
Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
|
2023-11-06 22:58:15 +00:00
|
|
|
ASSERT(!(Refs == 0 && VacbDirty));
|
|
|
|
if (VacbTrace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
|
2023-11-06 22:58:15 +00:00
|
|
|
file, line, vacb, Refs, VacbDirty, VacbPageOut);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2018-03-24 18:15:16 +00:00
|
|
|
|
2018-04-08 17:09:36 +00:00
|
|
|
if (Refs == 0)
|
|
|
|
{
|
|
|
|
CcRosInternalFreeVacb(vacb);
|
|
|
|
}
|
|
|
|
|
2018-03-24 18:15:16 +00:00
|
|
|
return Refs;
|
|
|
|
}
|
|
|
|
ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
|
|
|
|
{
|
|
|
|
ULONG Refs;
|
|
|
|
|
|
|
|
Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
|
|
|
|
if (vacb->SharedCacheMap->Trace)
|
|
|
|
{
|
|
|
|
DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
|
|
|
|
file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Refs;
|
2005-08-13 13:16:16 +00:00
|
|
|
}
|
|
|
|
#endif
|
2002-09-30 20:55:33 +00:00
|
|
|
|
2005-11-19 22:13:35 +00:00
|
|
|
|
2001-12-27 23:56:42 +00:00
|
|
|
/* FUNCTIONS *****************************************************************/
|
1998-12-21 15:48:21 +00:00
|
|
|
|
2005-08-13 13:16:16 +00:00
|
|
|
VOID
|
|
|
|
CcRosTraceCacheMap (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN Trace )
|
2005-08-13 13:16:16 +00:00
|
|
|
{
|
2009-10-17 15:03:54 +00:00
|
|
|
#if DBG
|
2012-12-19 11:11:34 +00:00
|
|
|
KIRQL oldirql;
|
|
|
|
PLIST_ENTRY current_entry;
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
if (!SharedCacheMap)
|
2012-12-19 11:11:34 +00:00
|
|
|
return;
|
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap->Trace = Trace;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
if (Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
|
|
|
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
|
2012-12-19 11:11:34 +00:00
|
|
|
current_entry = current_entry->Flink;
|
|
|
|
|
2021-01-05 11:32:09 +00:00
|
|
|
DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n",
|
|
|
|
current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2018-12-19 21:49:48 +00:00
|
|
|
|
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2005-08-13 13:16:16 +00:00
|
|
|
|
|
|
|
#else
|
2014-04-12 10:59:48 +00:00
|
|
|
UNREFERENCED_PARAMETER(SharedCacheMap);
|
|
|
|
UNREFERENCED_PARAMETER(Trace);
|
2005-08-13 13:16:16 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
NTSTATUS
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosFlushVacb (
|
2021-01-29 17:48:32 +00:00
|
|
|
_In_ PROS_VACB Vacb,
|
2021-05-24 06:56:17 +00:00
|
|
|
_Out_opt_ PIO_STATUS_BLOCK Iosb)
|
2002-08-14 20:58:39 +00:00
|
|
|
{
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
NTSTATUS Status;
|
2020-12-04 09:18:32 +00:00
|
|
|
BOOLEAN HaveLock = FALSE;
|
2021-01-29 17:48:32 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2018-04-30 08:36:19 +00:00
|
|
|
CcRosUnmarkDirtyVacb(Vacb, TRUE);
|
|
|
|
|
2020-12-04 09:18:32 +00:00
|
|
|
/* Lock for flush, if we are not already the top-level */
|
|
|
|
if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP)
|
|
|
|
{
|
|
|
|
Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
goto quit;
|
|
|
|
HaveLock = TRUE;
|
|
|
|
}
|
|
|
|
|
2021-01-29 17:48:32 +00:00
|
|
|
Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer,
|
|
|
|
&Vacb->FileOffset,
|
|
|
|
VACB_MAPPING_GRANULARITY,
|
|
|
|
Iosb);
|
2020-11-06 08:39:31 +00:00
|
|
|
|
2020-12-04 09:18:32 +00:00
|
|
|
if (HaveLock)
|
|
|
|
{
|
|
|
|
FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject);
|
|
|
|
}
|
|
|
|
|
|
|
|
quit:
|
2018-04-30 08:36:19 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
CcRosMarkDirtyVacb(Vacb);
|
2021-01-29 17:48:32 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Update VDL */
|
|
|
|
if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY))
|
|
|
|
{
|
|
|
|
SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY;
|
|
|
|
}
|
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return Status;
|
2002-08-14 20:58:39 +00:00
|
|
|
}
|
|
|
|
|
2021-02-19 14:47:05 +00:00
|
|
|
static
|
|
|
|
NTSTATUS
|
|
|
|
CcRosDeleteFileCache (
|
|
|
|
PFILE_OBJECT FileObject,
|
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
|
|
|
PKIRQL OldIrql)
|
|
|
|
/*
|
|
|
|
* FUNCTION: Releases the shared cache map associated with a file object
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
PLIST_ENTRY current_entry;
|
|
|
|
|
|
|
|
ASSERT(SharedCacheMap);
|
|
|
|
ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap);
|
|
|
|
ASSERT(SharedCacheMap->OpenCount == 0);
|
|
|
|
|
|
|
|
/* Remove all VACBs from the global lists */
|
|
|
|
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
|
|
|
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
|
|
|
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
|
|
|
{
|
|
|
|
PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
|
|
|
|
|
|
|
|
RemoveEntryList(&Vacb->VacbLruListEntry);
|
|
|
|
InitializeListHead(&Vacb->VacbLruListEntry);
|
|
|
|
|
|
|
|
if (Vacb->Dirty)
|
|
|
|
{
|
|
|
|
CcRosUnmarkDirtyVacb(Vacb, FALSE);
|
|
|
|
/* Mark it as dirty again so we know that we have to flush before freeing it */
|
|
|
|
Vacb->Dirty = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
current_entry = current_entry->Flink;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure there is no trace anymore of this map */
|
|
|
|
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
|
|
|
|
RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
|
|
|
|
|
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
|
|
|
|
|
|
|
|
/* Now that we're out of the locks, free everything for real */
|
|
|
|
while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
|
|
|
|
{
|
|
|
|
PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry);
|
|
|
|
ULONG RefCount;
|
|
|
|
|
|
|
|
InitializeListHead(&Vacb->CacheMapVacbListEntry);
|
|
|
|
|
|
|
|
/* Flush to disk, if needed */
|
|
|
|
if (Vacb->Dirty)
|
|
|
|
{
|
|
|
|
IO_STATUS_BLOCK Iosb;
|
|
|
|
NTSTATUS Status;
|
|
|
|
|
|
|
|
Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
/* Complain. There's not much we can do */
|
|
|
|
DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status);
|
|
|
|
}
|
|
|
|
Vacb->Dirty = FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
RefCount = CcRosVacbDecRefCount(Vacb);
|
|
|
|
#if DBG // CORE-14578
|
|
|
|
if (RefCount != 0)
|
|
|
|
{
|
|
|
|
DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart);
|
|
|
|
DPRINT1("There are: %d references left\n", RefCount);
|
|
|
|
DPRINT1("Map: %d\n", Vacb->MappedCount);
|
|
|
|
DPRINT1("Dirty: %d\n", Vacb->Dirty);
|
|
|
|
if (FileObject->FileName.Length != 0)
|
|
|
|
{
|
|
|
|
DPRINT1("File was: %wZ\n", &FileObject->FileName);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
DPRINT1("No name for the file\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
(void)RefCount;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release the references we own */
|
|
|
|
if(SharedCacheMap->Section)
|
|
|
|
ObDereferenceObject(SharedCacheMap->Section);
|
|
|
|
ObDereferenceObject(SharedCacheMap->FileObject);
|
|
|
|
|
|
|
|
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
|
|
|
|
|
|
|
|
/* Acquire the lock again for our caller */
|
|
|
|
*OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2002-08-14 20:58:39 +00:00
|
|
|
NTSTATUS
|
2012-12-19 11:11:34 +00:00
|
|
|
CcRosFlushDirtyPages (
|
|
|
|
ULONG Target,
|
|
|
|
PULONG Count,
|
2018-01-23 18:07:25 +00:00
|
|
|
BOOLEAN Wait,
|
|
|
|
BOOLEAN CalledFromLazy)
|
2002-08-14 20:58:39 +00:00
|
|
|
{
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
PLIST_ENTRY current_entry;
|
|
|
|
NTSTATUS Status;
|
2018-12-19 21:49:48 +00:00
|
|
|
KIRQL OldIrql;
|
2020-12-21 17:47:32 +00:00
|
|
|
BOOLEAN FlushAll = (Target == MAXULONG);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2013-02-16 17:37:17 +00:00
|
|
|
DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
(*Count) = 0;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2010-10-05 15:52:00 +00:00
|
|
|
KeEnterCriticalRegion();
|
2018-12-19 21:49:48 +00:00
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
current_entry = DirtyVacbListHead.Flink;
|
|
|
|
if (current_entry == &DirtyVacbListHead)
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
{
|
|
|
|
DPRINT("No Dirty pages\n");
|
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2020-12-21 17:47:32 +00:00
|
|
|
while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll)
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
{
|
2020-12-29 10:26:25 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
|
|
|
PROS_VACB current;
|
|
|
|
BOOLEAN Locked;
|
|
|
|
|
2020-12-21 17:47:32 +00:00
|
|
|
if (current_entry == &DirtyVacbListHead)
|
|
|
|
{
|
|
|
|
ASSERT(FlushAll);
|
|
|
|
if (IsListEmpty(&DirtyVacbListHead))
|
|
|
|
break;
|
|
|
|
current_entry = DirtyVacbListHead.Flink;
|
|
|
|
}
|
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
|
|
|
DirtyVacbListEntry);
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
current_entry = current_entry->Flink;
|
2007-11-18 23:11:20 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbIncRefCount(current);
|
2011-12-23 23:20:09 +00:00
|
|
|
|
2020-12-29 10:26:25 +00:00
|
|
|
SharedCacheMap = current->SharedCacheMap;
|
|
|
|
|
2018-01-23 18:07:25 +00:00
|
|
|
/* When performing lazy write, don't handle temporary files */
|
2020-12-29 10:26:25 +00:00
|
|
|
if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
|
2018-01-23 18:07:25 +00:00
|
|
|
{
|
|
|
|
CcRosVacbDecRefCount(current);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-12-23 11:10:58 +00:00
|
|
|
/* Don't attempt to lazy write the files that asked not to */
|
2020-12-29 10:26:25 +00:00
|
|
|
if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED))
|
2018-12-23 11:10:58 +00:00
|
|
|
{
|
|
|
|
CcRosVacbDecRefCount(current);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-12-21 07:44:14 +00:00
|
|
|
ASSERT(current->Dirty);
|
|
|
|
|
2020-12-22 10:31:51 +00:00
|
|
|
/* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */
|
2020-12-29 10:26:25 +00:00
|
|
|
if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE)
|
2020-12-22 10:31:51 +00:00
|
|
|
{
|
|
|
|
CcRosVacbDecRefCount(current);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-12-29 10:26:25 +00:00
|
|
|
SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE;
|
2020-12-22 10:31:51 +00:00
|
|
|
|
2021-02-19 14:47:05 +00:00
|
|
|
/* Keep a ref on the shared cache map */
|
|
|
|
SharedCacheMap->OpenCount++;
|
|
|
|
|
2018-12-21 07:44:14 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
|
|
|
2020-12-29 10:26:25 +00:00
|
|
|
Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait);
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
if (!Locked)
|
|
|
|
{
|
2020-11-06 08:39:31 +00:00
|
|
|
DPRINT("Not locked!");
|
|
|
|
ASSERT(!Wait);
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(current);
|
2020-12-29 10:26:25 +00:00
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
|
2021-02-19 14:47:05 +00:00
|
|
|
|
|
|
|
if (--SharedCacheMap->OpenCount == 0)
|
|
|
|
CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
|
|
|
|
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
continue;
|
|
|
|
}
|
2011-12-21 19:08:59 +00:00
|
|
|
|
2021-01-29 17:48:32 +00:00
|
|
|
IO_STATUS_BLOCK Iosb;
|
|
|
|
Status = CcRosFlushVacb(current, &Iosb);
|
2003-01-30 18:30:53 +00:00
|
|
|
|
2020-12-29 10:26:25 +00:00
|
|
|
SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2020-12-09 17:05:07 +00:00
|
|
|
/* We release the VACB before acquiring the lock again, because
|
|
|
|
* CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a
|
|
|
|
* Refcount. Freeing must be done outside of the lock.
|
|
|
|
* The refcount is decremented atomically. So this is OK. */
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(current);
|
2020-12-09 17:05:07 +00:00
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
2007-10-19 23:21:45 +00:00
|
|
|
|
2020-12-29 10:26:25 +00:00
|
|
|
SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
|
2020-12-22 10:31:51 +00:00
|
|
|
|
2021-02-19 14:47:05 +00:00
|
|
|
if (--SharedCacheMap->OpenCount == 0)
|
|
|
|
CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
|
|
|
|
|
2015-10-18 16:09:11 +00:00
|
|
|
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
|
|
|
|
(Status != STATUS_MEDIA_WRITE_PROTECTED))
|
2005-11-27 15:59:32 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT1("CC: Failed to flush VACB.\n");
|
2005-11-27 15:59:32 +00:00
|
|
|
}
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
else
|
|
|
|
{
|
2018-01-23 18:07:25 +00:00
|
|
|
ULONG PagesFreed;
|
|
|
|
|
|
|
|
/* How many pages did we free? */
|
2021-01-29 17:48:32 +00:00
|
|
|
PagesFreed = Iosb.Information / PAGE_SIZE;
|
2018-01-23 18:07:25 +00:00
|
|
|
(*Count) += PagesFreed;
|
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
if (!Wait)
|
2018-01-23 18:07:25 +00:00
|
|
|
{
|
2020-11-06 08:39:31 +00:00
|
|
|
/* Make sure we don't overflow target! */
|
|
|
|
if (Target < PagesFreed)
|
|
|
|
{
|
|
|
|
/* If we would have, jump to zero directly */
|
|
|
|
Target = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
Target -= PagesFreed;
|
|
|
|
}
|
2018-01-23 18:07:25 +00:00
|
|
|
}
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
}
|
2011-12-23 23:20:09 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
current_entry = DirtyVacbListHead.Flink;
|
2002-08-14 20:58:39 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
2010-10-05 15:52:00 +00:00
|
|
|
KeLeaveCriticalRegion();
|
2012-12-19 11:11:34 +00:00
|
|
|
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
DPRINT("CcRosFlushDirtyPages() finished\n");
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
2002-08-14 20:58:39 +00:00
|
|
|
}
|
|
|
|
|
2023-09-06 11:34:25 +00:00
|
|
|
VOID
|
|
|
|
CcRosTrimCache(
|
|
|
|
_In_ ULONG Target,
|
|
|
|
_Out_ PULONG NrFreed)
|
|
|
|
/*
|
|
|
|
* FUNCTION: Try to free some memory from the file cache.
|
|
|
|
* ARGUMENTS:
|
|
|
|
* Target - The number of pages to be freed.
|
|
|
|
* NrFreed - Points to a variable where the number of pages
|
|
|
|
* actually freed is returned.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
PLIST_ENTRY current_entry;
|
|
|
|
PROS_VACB current;
|
|
|
|
ULONG PagesFreed;
|
|
|
|
KIRQL oldIrql;
|
|
|
|
LIST_ENTRY FreeList;
|
|
|
|
BOOLEAN FlushedPages = FALSE;
|
|
|
|
|
|
|
|
DPRINT("CcRosTrimCache(Target %lu)\n", Target);
|
|
|
|
|
|
|
|
InitializeListHead(&FreeList);
|
|
|
|
|
|
|
|
*NrFreed = 0;
|
|
|
|
|
|
|
|
retry:
|
|
|
|
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
|
|
|
|
current_entry = VacbLruListHead.Flink;
|
|
|
|
while (current_entry != &VacbLruListHead)
|
|
|
|
{
|
|
|
|
ULONG Refs;
|
|
|
|
|
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
|
|
|
VacbLruListEntry);
|
|
|
|
|
|
|
|
KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
|
|
|
|
|
|
|
/* Reference the VACB */
|
|
|
|
CcRosVacbIncRefCount(current);
|
|
|
|
|
|
|
|
/* Check if it's mapped and not dirty */
|
|
|
|
if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty)
|
|
|
|
{
|
2024-04-30 18:59:50 +00:00
|
|
|
/* This code is never executed. It is left for reference only. */
|
|
|
|
#if 1
|
|
|
|
DPRINT1("MmPageOutPhysicalAddress unexpectedly called\n");
|
|
|
|
ASSERT(FALSE);
|
|
|
|
#else
|
|
|
|
ULONG i;
|
|
|
|
PFN_NUMBER Page;
|
|
|
|
|
|
|
|
/* We have to break these locks to call MmPageOutPhysicalAddress */
|
|
|
|
KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
|
|
|
|
2023-09-06 11:34:25 +00:00
|
|
|
/* Page out the VACB */
|
|
|
|
for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
|
|
|
|
{
|
|
|
|
Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
|
|
|
|
|
|
|
|
MmPageOutPhysicalAddress(Page);
|
|
|
|
}
|
2024-04-30 18:59:50 +00:00
|
|
|
|
|
|
|
/* Reacquire the locks */
|
|
|
|
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
|
|
|
#endif
|
2023-09-06 11:34:25 +00:00
|
|
|
}
|
|
|
|
|
2024-04-30 18:59:50 +00:00
|
|
|
/* Only keep iterating though the loop while the lock is held */
|
|
|
|
current_entry = current_entry->Flink;
|
|
|
|
|
2023-09-06 11:34:25 +00:00
|
|
|
/* Dereference the VACB */
|
|
|
|
Refs = CcRosVacbDecRefCount(current);
|
|
|
|
|
|
|
|
/* Check if we can free this entry now */
|
|
|
|
if (Refs < 2)
|
|
|
|
{
|
|
|
|
ASSERT(!current->Dirty);
|
|
|
|
ASSERT(!current->MappedCount);
|
|
|
|
ASSERT(Refs == 1);
|
|
|
|
|
|
|
|
RemoveEntryList(¤t->CacheMapVacbListEntry);
|
|
|
|
RemoveEntryList(¤t->VacbLruListEntry);
|
|
|
|
InitializeListHead(¤t->VacbLruListEntry);
|
|
|
|
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
|
|
|
|
|
|
|
|
/* Calculate how many pages we freed for Mm */
|
|
|
|
PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
|
|
|
|
Target -= PagesFreed;
|
|
|
|
(*NrFreed) += PagesFreed;
|
|
|
|
}
|
|
|
|
|
|
|
|
KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
|
|
|
}
|
|
|
|
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
|
|
|
|
|
|
|
/* Try flushing pages if we haven't met our target */
|
|
|
|
if ((Target > 0) && !FlushedPages)
|
|
|
|
{
|
|
|
|
/* Flush dirty pages to disk */
|
|
|
|
CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
|
|
|
|
FlushedPages = TRUE;
|
|
|
|
|
|
|
|
/* We can only swap as many pages as we flushed */
|
|
|
|
if (PagesFreed < Target) Target = PagesFreed;
|
|
|
|
|
|
|
|
/* Check if we flushed anything */
|
|
|
|
if (PagesFreed != 0)
|
|
|
|
{
|
|
|
|
/* Try again after flushing dirty pages */
|
|
|
|
DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!IsListEmpty(&FreeList))
|
|
|
|
{
|
|
|
|
ULONG Refs;
|
|
|
|
|
|
|
|
current_entry = RemoveHeadList(&FreeList);
|
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
|
|
|
CacheMapVacbListEntry);
|
|
|
|
InitializeListHead(¤t->CacheMapVacbListEntry);
|
|
|
|
Refs = CcRosVacbDecRefCount(current);
|
|
|
|
ASSERT(Refs == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINT("Evicted %lu cache pages\n", (*NrFreed));
|
|
|
|
}
|
|
|
|
|
2005-05-09 01:38:29 +00:00
|
|
|
NTSTATUS
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosReleaseVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB Vacb,
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN Dirty,
|
|
|
|
BOOLEAN Mapped)
|
1999-05-29 00:15:17 +00:00
|
|
|
{
|
2018-03-24 18:15:16 +00:00
|
|
|
ULONG Refs;
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2001-12-29 14:32:22 +00:00
|
|
|
|
2021-01-05 09:41:41 +00:00
|
|
|
DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb);
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2018-01-28 10:09:21 +00:00
|
|
|
if (Dirty && !Vacb->Dirty)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2018-01-28 10:09:21 +00:00
|
|
|
CcRosMarkDirtyVacb(Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
if (Mapped)
|
2002-07-17 21:04:57 +00:00
|
|
|
{
|
2018-04-08 16:58:15 +00:00
|
|
|
if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
|
|
|
|
{
|
|
|
|
CcRosVacbIncRefCount(Vacb);
|
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2018-04-08 16:58:15 +00:00
|
|
|
Refs = CcRosVacbDecRefCount(Vacb);
|
2018-03-24 18:15:16 +00:00
|
|
|
ASSERT(Refs > 0);
|
2018-03-17 10:56:25 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
1999-05-29 00:15:17 +00:00
|
|
|
}
|
1998-12-21 15:48:21 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
/* Returns with VACB Lock Held! */
|
|
|
|
PROS_VACB
|
|
|
|
CcRosLookupVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset)
|
2001-12-31 01:53:46 +00:00
|
|
|
{
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
PLIST_ENTRY current_entry;
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
KIRQL oldIrql;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-08-31 12:56:36 +00:00
|
|
|
DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap, FileOffset);
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
|
|
|
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
2001-12-31 01:53:46 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
2014-04-12 10:59:48 +00:00
|
|
|
CacheMapVacbListEntry);
|
2014-04-12 11:51:46 +00:00
|
|
|
if (IsPointInRange(current->FileOffset.QuadPart,
|
|
|
|
VACB_MAPPING_GRANULARITY,
|
2014-04-12 09:31:07 +00:00
|
|
|
FileOffset))
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbIncRefCount(current);
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
2013-09-29 20:52:23 +00:00
|
|
|
return current;
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
}
|
2014-04-12 11:51:46 +00:00
|
|
|
if (current->FileOffset.QuadPart > FileOffset)
|
2013-09-29 21:08:15 +00:00
|
|
|
break;
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
current_entry = current_entry->Flink;
|
2001-12-31 01:53:46 +00:00
|
|
|
}
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2013-09-29 20:52:23 +00:00
|
|
|
return NULL;
|
2001-12-31 01:53:46 +00:00
|
|
|
}
|
|
|
|
|
2018-01-23 20:48:29 +00:00
|
|
|
VOID
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosMarkDirtyVacb (
|
2018-01-23 20:48:29 +00:00
|
|
|
PROS_VACB Vacb)
|
2002-08-14 20:58:39 +00:00
|
|
|
{
|
2012-12-19 11:11:34 +00:00
|
|
|
KIRQL oldIrql;
|
2018-01-23 20:48:29 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2018-01-23 20:48:29 +00:00
|
|
|
SharedCacheMap = Vacb->SharedCacheMap;
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2018-01-28 10:09:21 +00:00
|
|
|
ASSERT(!Vacb->Dirty);
|
|
|
|
|
|
|
|
InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
|
2020-12-02 11:39:58 +00:00
|
|
|
/* FIXME: There is no reason to account for the whole VACB. */
|
|
|
|
CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
|
|
|
Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
2018-01-28 10:09:21 +00:00
|
|
|
CcRosVacbIncRefCount(Vacb);
|
2002-08-14 20:58:39 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
/* Move to the tail of the LRU list */
|
2014-04-12 09:31:07 +00:00
|
|
|
RemoveEntryList(&Vacb->VacbLruListEntry);
|
|
|
|
InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
|
2011-12-07 09:49:49 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
Vacb->Dirty = TRUE;
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
2018-02-07 17:10:59 +00:00
|
|
|
|
2018-02-07 20:35:22 +00:00
|
|
|
/* Schedule a lazy writer run to now that we have dirty VACB */
|
2018-02-07 17:10:59 +00:00
|
|
|
if (!LazyWriter.ScanActive)
|
|
|
|
{
|
|
|
|
CcScheduleLazyWriteScan(FALSE);
|
|
|
|
}
|
2018-02-07 20:35:22 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
2018-01-23 20:48:29 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 07:35:27 +00:00
|
|
|
VOID
|
|
|
|
CcRosUnmarkDirtyVacb (
|
|
|
|
PROS_VACB Vacb,
|
|
|
|
BOOLEAN LockViews)
|
|
|
|
{
|
|
|
|
KIRQL oldIrql;
|
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
|
|
|
|
|
|
|
SharedCacheMap = Vacb->SharedCacheMap;
|
|
|
|
|
|
|
|
if (LockViews)
|
|
|
|
{
|
2018-12-19 21:49:48 +00:00
|
|
|
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
2018-02-01 07:35:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(Vacb->Dirty);
|
|
|
|
|
|
|
|
Vacb->Dirty = FALSE;
|
|
|
|
|
|
|
|
RemoveEntryList(&Vacb->DirtyVacbListEntry);
|
2018-04-08 16:34:00 +00:00
|
|
|
InitializeListHead(&Vacb->DirtyVacbListEntry);
|
2020-11-06 08:39:31 +00:00
|
|
|
|
2020-12-02 11:39:58 +00:00
|
|
|
CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
|
|
|
Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
2020-11-06 08:39:31 +00:00
|
|
|
|
2018-02-01 07:35:27 +00:00
|
|
|
CcRosVacbDecRefCount(Vacb);
|
|
|
|
|
|
|
|
if (LockViews)
|
|
|
|
{
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
2018-02-01 07:35:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-30 10:10:24 +00:00
|
|
|
BOOLEAN
|
2022-10-01 20:48:46 +00:00
|
|
|
CcRosFreeOneUnusedVacb(
|
|
|
|
VOID)
|
2018-04-30 10:10:24 +00:00
|
|
|
{
|
|
|
|
KIRQL oldIrql;
|
|
|
|
PLIST_ENTRY current_entry;
|
2022-10-01 20:48:46 +00:00
|
|
|
PROS_VACB to_free = NULL;
|
2018-04-30 10:10:24 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
2018-04-30 10:10:24 +00:00
|
|
|
|
|
|
|
/* Browse all the available VACB */
|
|
|
|
current_entry = VacbLruListHead.Flink;
|
2022-10-01 20:48:46 +00:00
|
|
|
while ((current_entry != &VacbLruListHead) && (to_free == NULL))
|
2018-04-30 10:10:24 +00:00
|
|
|
{
|
|
|
|
ULONG Refs;
|
2022-10-01 20:48:46 +00:00
|
|
|
PROS_VACB current;
|
2018-04-30 10:10:24 +00:00
|
|
|
|
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
|
|
|
VacbLruListEntry);
|
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
2018-04-30 10:10:24 +00:00
|
|
|
|
|
|
|
/* Only deal with unused VACB, we will free them */
|
|
|
|
Refs = CcRosVacbGetRefCount(current);
|
|
|
|
if (Refs < 2)
|
|
|
|
{
|
|
|
|
ASSERT(!current->Dirty);
|
|
|
|
ASSERT(!current->MappedCount);
|
|
|
|
ASSERT(Refs == 1);
|
|
|
|
|
2022-10-01 20:48:46 +00:00
|
|
|
/* Reset it, this is the one we want to free */
|
2018-04-30 10:10:24 +00:00
|
|
|
RemoveEntryList(¤t->CacheMapVacbListEntry);
|
2022-10-01 20:48:46 +00:00
|
|
|
InitializeListHead(¤t->CacheMapVacbListEntry);
|
2018-04-30 10:10:24 +00:00
|
|
|
RemoveEntryList(¤t->VacbLruListEntry);
|
|
|
|
InitializeListHead(¤t->VacbLruListEntry);
|
2022-10-01 20:48:46 +00:00
|
|
|
|
|
|
|
to_free = current;
|
2018-04-30 10:10:24 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
2018-04-30 10:10:24 +00:00
|
|
|
|
2022-10-01 20:48:46 +00:00
|
|
|
current_entry = current_entry->Flink;
|
2018-04-30 10:10:24 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
2018-04-30 10:10:24 +00:00
|
|
|
|
2022-10-01 20:48:46 +00:00
|
|
|
/* And now, free the VACB that we found, if any. */
|
|
|
|
if (to_free == NULL)
|
2018-04-30 10:10:24 +00:00
|
|
|
{
|
2022-10-01 20:48:46 +00:00
|
|
|
return FALSE;
|
2018-04-30 10:10:24 +00:00
|
|
|
}
|
|
|
|
|
2022-10-01 20:48:46 +00:00
|
|
|
/* This must be its last ref */
|
|
|
|
NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0);
|
2018-04-30 10:10:24 +00:00
|
|
|
|
2022-10-01 20:48:46 +00:00
|
|
|
return TRUE;
|
2018-04-30 10:10:24 +00:00
|
|
|
}
|
|
|
|
|
2008-12-03 17:28:59 +00:00
|
|
|
static
|
|
|
|
NTSTATUS
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosCreateVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset,
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB *Vacb)
|
2002-01-26 21:21:02 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
|
|
|
PROS_VACB previous;
|
2012-12-19 11:11:34 +00:00
|
|
|
PLIST_ENTRY current_entry;
|
|
|
|
NTSTATUS Status;
|
|
|
|
KIRQL oldIrql;
|
2018-04-08 17:09:36 +00:00
|
|
|
ULONG Refs;
|
2020-11-06 08:39:31 +00:00
|
|
|
SIZE_T ViewSize = VACB_MAPPING_GRANULARITY;
|
2004-10-22 20:19:58 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2002-12-15 Casper S. Hornstrup <chorns@users.sourceforge.net>
* drivers/fs/vfat/cleanup.c (VfatCleanupFile): Only uninitialize caching
when initialized.
* drivers/fs/vfat/fcb.c (vfatReleaseFCB): Ditto.
* lib/kernel32/mem/section.c (CreateFileMappingW): Pass NULL as
MaximumSize to NtCreateSection if dwMaximumSizeHigh and dwMaximumSizeLow
are both 0.
* ntoskrnl/cc/pin.c (CcMapData): Assert if Bcb is NULL.
* ntoskrnl/cc/view.c (CcRosReleaseCacheSegment, CcRosLookupCacheSegment,
CcRosMarkDirtyCacheSegment, CcRosUnmapCacheSegment,
CcRosCreateCacheSegment, CcRosGetCacheSegmentChain,
CcRosGetCacheSegment, CcRosRequestCacheSegment, CcFlushCache,
CcRosDeleteFileCache, CcRosReferenceCache, CcRosDereferenceCache,
CcRosReleaseFileCache, CcGetFileObjectFromSectionPtrs): Ditto.
* ntoskrnl/mm/section.c (MiReadPage): Assert if Fcb->Bcb is NULL.
(MmCreateDataFileSection): Make sure caching is initialized for the file
stream.
svn path=/trunk/; revision=3879
2002-12-15 17:01:52 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT("CcRosCreateVacb()\n");
|
2002-01-26 21:21:02 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
|
2024-08-30 00:34:48 +00:00
|
|
|
if (!current)
|
|
|
|
{
|
|
|
|
return STATUS_INSUFFICIENT_RESOURCES;
|
|
|
|
}
|
2014-10-08 00:30:30 +00:00
|
|
|
current->BaseAddress = NULL;
|
2012-12-19 11:11:34 +00:00
|
|
|
current->Dirty = FALSE;
|
|
|
|
current->PageOut = FALSE;
|
2014-04-12 11:51:46 +00:00
|
|
|
current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
|
2014-04-12 10:59:48 +00:00
|
|
|
current->SharedCacheMap = SharedCacheMap;
|
2012-12-19 11:11:34 +00:00
|
|
|
current->MappedCount = 0;
|
2018-03-17 10:56:25 +00:00
|
|
|
current->ReferenceCount = 0;
|
2018-04-08 16:34:00 +00:00
|
|
|
InitializeListHead(¤t->CacheMapVacbListEntry);
|
|
|
|
InitializeListHead(¤t->DirtyVacbListEntry);
|
|
|
|
InitializeListHead(¤t->VacbLruListEntry);
|
2018-04-08 16:45:38 +00:00
|
|
|
|
|
|
|
CcRosVacbIncRefCount(current);
|
|
|
|
|
2022-10-01 20:48:46 +00:00
|
|
|
while (TRUE)
|
2018-04-08 16:45:38 +00:00
|
|
|
{
|
2022-10-01 20:48:46 +00:00
|
|
|
/* Map VACB in system space */
|
|
|
|
Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset, 0);
|
|
|
|
if (NT_SUCCESS(Status))
|
2018-04-30 10:10:24 +00:00
|
|
|
{
|
2022-10-01 20:48:46 +00:00
|
|
|
break;
|
2018-04-30 10:10:24 +00:00
|
|
|
}
|
|
|
|
|
2022-10-01 20:48:46 +00:00
|
|
|
/*
|
|
|
|
* If no space left, try to prune one unused VACB to recover space to map our VACB.
|
|
|
|
* If it succeeds, retry to map, otherwise just fail.
|
|
|
|
*/
|
|
|
|
if (!CcRosFreeOneUnusedVacb())
|
|
|
|
{
|
|
|
|
ExFreeToNPagedLookasideList(&VacbLookasideList, current);
|
|
|
|
return Status;
|
|
|
|
}
|
2018-04-08 16:45:38 +00:00
|
|
|
}
|
|
|
|
|
2021-01-05 11:32:09 +00:00
|
|
|
#if DBG
|
|
|
|
if (SharedCacheMap->Trace)
|
|
|
|
{
|
|
|
|
DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n",
|
|
|
|
SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
*Vacb = current;
|
|
|
|
/* There is window between the call to CcRosLookupVacb
|
|
|
|
* and CcRosCreateVacb. We must check if a VACB for the
|
|
|
|
* file offset exist. If there is a VACB, we release
|
|
|
|
* our newly created VACB and return the existing one.
|
2012-12-19 11:11:34 +00:00
|
|
|
*/
|
2018-12-19 21:49:48 +00:00
|
|
|
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
2012-12-19 11:11:34 +00:00
|
|
|
previous = NULL;
|
2014-04-12 10:59:48 +00:00
|
|
|
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
2014-04-12 10:59:48 +00:00
|
|
|
CacheMapVacbListEntry);
|
2014-04-12 11:51:46 +00:00
|
|
|
if (IsPointInRange(current->FileOffset.QuadPart,
|
|
|
|
VACB_MAPPING_GRANULARITY,
|
2014-04-12 09:31:07 +00:00
|
|
|
FileOffset))
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbIncRefCount(current);
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
2012-12-19 11:11:34 +00:00
|
|
|
#if DBG
|
2014-04-12 10:59:48 +00:00
|
|
|
if (SharedCacheMap->Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap,
|
2014-04-12 09:31:07 +00:00
|
|
|
(*Vacb),
|
2014-04-12 10:59:48 +00:00
|
|
|
current);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
#endif
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
2018-04-08 17:09:36 +00:00
|
|
|
|
|
|
|
Refs = CcRosVacbDecRefCount(*Vacb);
|
|
|
|
ASSERT(Refs == 0);
|
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
*Vacb = current;
|
2012-12-19 11:11:34 +00:00
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
2014-04-12 11:51:46 +00:00
|
|
|
if (current->FileOffset.QuadPart < FileOffset)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2013-09-29 21:08:15 +00:00
|
|
|
ASSERT(previous == NULL ||
|
2014-04-12 11:51:46 +00:00
|
|
|
previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
|
2013-09-29 21:08:15 +00:00
|
|
|
previous = current;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2014-04-12 11:51:46 +00:00
|
|
|
if (current->FileOffset.QuadPart > FileOffset)
|
2013-09-29 21:08:15 +00:00
|
|
|
break;
|
2012-12-19 11:11:34 +00:00
|
|
|
current_entry = current_entry->Flink;
|
|
|
|
}
|
2014-04-12 09:31:07 +00:00
|
|
|
/* There was no existing VACB. */
|
|
|
|
current = *Vacb;
|
2012-12-19 11:11:34 +00:00
|
|
|
if (previous)
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
2014-04-12 09:31:07 +00:00
|
|
|
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
|
2005-05-09 01:38:29 +00:00
|
|
|
|
2018-03-25 16:26:32 +00:00
|
|
|
/* Reference it to allow release */
|
|
|
|
CcRosVacbIncRefCount(current);
|
|
|
|
|
2023-08-12 17:10:34 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
|
|
|
|
2014-10-08 00:30:30 +00:00
|
|
|
return Status;
|
2002-01-26 21:21:02 +00:00
|
|
|
}
|
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
BOOLEAN
|
|
|
|
CcRosEnsureVacbResident(
|
|
|
|
_In_ PROS_VACB Vacb,
|
|
|
|
_In_ BOOLEAN Wait,
|
|
|
|
_In_ BOOLEAN NoRead,
|
|
|
|
_In_ ULONG Offset,
|
|
|
|
_In_ ULONG Length
|
|
|
|
)
|
|
|
|
{
|
2024-11-24 14:37:27 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
|
2020-11-06 08:39:31 +00:00
|
|
|
|
|
|
|
ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY);
|
|
|
|
|
2020-12-29 15:55:19 +00:00
|
|
|
#if 0
|
2024-11-24 14:37:27 +00:00
|
|
|
if ((Vacb->FileOffset.QuadPart + Offset) > SharedCacheMap->SectionSize.QuadPart)
|
2020-12-02 11:39:58 +00:00
|
|
|
{
|
|
|
|
DPRINT1("Vacb read beyond the file size!\n");
|
2020-11-06 08:39:31 +00:00
|
|
|
return FALSE;
|
2020-12-02 11:39:58 +00:00
|
|
|
}
|
2020-12-29 15:55:19 +00:00
|
|
|
#endif
|
2020-11-06 08:39:31 +00:00
|
|
|
|
|
|
|
/* Check if the pages are resident */
|
2024-11-24 14:37:27 +00:00
|
|
|
if (!MmIsDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer,
|
|
|
|
Vacb->FileOffset.QuadPart + Offset,
|
|
|
|
Length))
|
2020-11-06 08:39:31 +00:00
|
|
|
{
|
|
|
|
if (!Wait)
|
|
|
|
{
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!NoRead)
|
|
|
|
{
|
2021-01-29 17:48:32 +00:00
|
|
|
NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer,
|
|
|
|
Vacb->FileOffset.QuadPart + Offset,
|
|
|
|
Length,
|
|
|
|
&SharedCacheMap->ValidDataLength);
|
2020-11-06 08:39:31 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
ExRaiseStatus(Status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2001-04-03 17:25:50 +00:00
|
|
|
NTSTATUS
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosGetVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset,
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB *Vacb)
|
1998-12-21 15:48:21 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS Status;
|
2018-03-24 18:15:16 +00:00
|
|
|
ULONG Refs;
|
2018-12-19 21:49:48 +00:00
|
|
|
KIRQL OldIrql;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT("CcRosGetVacb()\n");
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
/*
|
2014-04-12 09:31:07 +00:00
|
|
|
* Look for a VACB already mapping the same data.
|
2012-12-19 11:11:34 +00:00
|
|
|
*/
|
2014-04-12 10:59:48 +00:00
|
|
|
current = CcRosLookupVacb(SharedCacheMap, FileOffset);
|
2012-12-19 11:11:34 +00:00
|
|
|
if (current == NULL)
|
|
|
|
{
|
|
|
|
/*
|
2014-04-12 09:31:07 +00:00
|
|
|
* Otherwise create a new VACB.
|
2012-12-19 11:11:34 +00:00
|
|
|
*/
|
2014-04-12 10:59:48 +00:00
|
|
|
Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t);
|
2012-12-19 11:11:34 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-24 18:15:16 +00:00
|
|
|
Refs = CcRosVacbGetRefCount(current);
|
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
/* Move to the tail of the LRU list */
|
2014-04-12 09:31:07 +00:00
|
|
|
RemoveEntryList(¤t->VacbLruListEntry);
|
|
|
|
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
/*
|
2020-11-06 08:39:31 +00:00
|
|
|
* Return the VACB to the caller.
|
2012-12-19 11:11:34 +00:00
|
|
|
*/
|
2014-04-12 09:31:07 +00:00
|
|
|
*Vacb = current;
|
2018-03-24 10:59:45 +00:00
|
|
|
|
2018-03-24 18:15:16 +00:00
|
|
|
ASSERT(Refs > 1);
|
2018-03-24 10:59:45 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
1999-05-29 00:15:17 +00:00
|
|
|
}
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosRequestVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset,
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB *Vacb)
|
2001-04-03 17:25:50 +00:00
|
|
|
/*
|
2014-04-12 10:59:48 +00:00
|
|
|
* FUNCTION: Request a page mapping for a shared cache map
|
2001-04-03 17:25:50 +00:00
|
|
|
*/
|
|
|
|
{
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2002-12-15 Casper S. Hornstrup <chorns@users.sourceforge.net>
* drivers/fs/vfat/cleanup.c (VfatCleanupFile): Only uninitialize caching
when initialized.
* drivers/fs/vfat/fcb.c (vfatReleaseFCB): Ditto.
* lib/kernel32/mem/section.c (CreateFileMappingW): Pass NULL as
MaximumSize to NtCreateSection if dwMaximumSizeHigh and dwMaximumSizeLow
are both 0.
* ntoskrnl/cc/pin.c (CcMapData): Assert if Bcb is NULL.
* ntoskrnl/cc/view.c (CcRosReleaseCacheSegment, CcRosLookupCacheSegment,
CcRosMarkDirtyCacheSegment, CcRosUnmapCacheSegment,
CcRosCreateCacheSegment, CcRosGetCacheSegmentChain,
CcRosGetCacheSegment, CcRosRequestCacheSegment, CcFlushCache,
CcRosDeleteFileCache, CcRosReferenceCache, CcRosDereferenceCache,
CcRosReleaseFileCache, CcGetFileObjectFromSectionPtrs): Ditto.
* ntoskrnl/mm/section.c (MiReadPage): Assert if Fcb->Bcb is NULL.
(MmCreateDataFileSection): Make sure caching is initialized for the file
stream.
svn path=/trunk/; revision=3879
2002-12-15 17:01:52 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
|
2001-04-03 17:25:50 +00:00
|
|
|
{
|
2014-08-31 12:56:36 +00:00
|
|
|
DPRINT1("Bad fileoffset %I64x should be multiple of %x",
|
2013-09-30 19:40:09 +00:00
|
|
|
FileOffset, VACB_MAPPING_GRANULARITY);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeBugCheck(CACHE_MANAGER);
|
2001-04-03 17:25:50 +00:00
|
|
|
}
|
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
return CcRosGetVacb(SharedCacheMap,
|
2014-04-12 09:31:07 +00:00
|
|
|
FileOffset,
|
|
|
|
Vacb);
|
2001-04-03 17:25:50 +00:00
|
|
|
}
|
2014-04-06 10:52:17 +00:00
|
|
|
|
2005-05-09 01:38:29 +00:00
|
|
|
NTSTATUS
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosInternalFreeVacb (
|
|
|
|
PROS_VACB Vacb)
|
2000-12-10 23:42:01 +00:00
|
|
|
/*
|
2014-04-12 10:59:48 +00:00
|
|
|
* FUNCTION: Releases a VACB associated with a shared cache map
|
2000-12-10 23:42:01 +00:00
|
|
|
*/
|
1999-05-29 00:15:17 +00:00
|
|
|
{
|
2020-11-06 08:39:31 +00:00
|
|
|
NTSTATUS Status;
|
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT("Freeing VACB 0x%p\n", Vacb);
|
2009-10-17 15:03:54 +00:00
|
|
|
#if DBG
|
2014-04-12 10:59:48 +00:00
|
|
|
if (Vacb->SharedCacheMap->Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2005-08-13 13:16:16 +00:00
|
|
|
#endif
|
2003-01-11 15:24:38 +00:00
|
|
|
|
2018-10-05 17:43:10 +00:00
|
|
|
if (Vacb->ReferenceCount != 0)
|
2018-03-17 10:56:25 +00:00
|
|
|
{
|
2018-10-05 17:43:10 +00:00
|
|
|
DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
|
2018-03-18 12:16:05 +00:00
|
|
|
if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
|
|
|
|
{
|
|
|
|
DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
|
|
|
|
}
|
2018-03-17 10:56:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(Vacb->ReferenceCount == 0);
|
2018-04-08 16:34:00 +00:00
|
|
|
ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
|
|
|
|
ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
|
|
|
|
ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
|
2020-12-29 15:55:19 +00:00
|
|
|
|
|
|
|
/* Delete the mapping */
|
|
|
|
Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status);
|
|
|
|
ASSERT(FALSE);
|
|
|
|
/* Proceed with the deĺetion anyway */
|
|
|
|
}
|
|
|
|
|
2018-04-14 09:13:12 +00:00
|
|
|
RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
|
2014-04-12 09:31:07 +00:00
|
|
|
ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
1999-05-29 00:15:17 +00:00
|
|
|
}
|
|
|
|
|
2003-07-10 06:27:13 +00:00
|
|
|
/*
|
|
|
|
* @implemented
|
|
|
|
*/
|
2012-12-19 11:11:34 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
CcFlushCache (
|
|
|
|
IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
|
|
|
|
IN PLARGE_INTEGER FileOffset OPTIONAL,
|
|
|
|
IN ULONG Length,
|
|
|
|
OUT PIO_STATUS_BLOCK IoStatus)
|
2002-10-02 19:20:51 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2021-01-05 09:45:39 +00:00
|
|
|
LONGLONG FlushStart, FlushEnd;
|
2011-12-17 12:59:01 +00:00
|
|
|
NTSTATUS Status;
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n",
|
|
|
|
SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length);
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2021-01-27 14:29:57 +00:00
|
|
|
if (!SectionObjectPointers)
|
2011-12-17 12:59:01 +00:00
|
|
|
{
|
2021-01-05 09:45:39 +00:00
|
|
|
Status = STATUS_INVALID_PARAMETER;
|
|
|
|
goto quit;
|
|
|
|
}
|
2005-05-09 01:38:29 +00:00
|
|
|
|
2021-01-27 14:29:57 +00:00
|
|
|
if (!SectionObjectPointers->SharedCacheMap)
|
|
|
|
{
|
|
|
|
/* Forward this to Mm */
|
|
|
|
MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-01-05 09:45:39 +00:00
|
|
|
SharedCacheMap = SectionObjectPointers->SharedCacheMap;
|
|
|
|
ASSERT(SharedCacheMap);
|
|
|
|
if (FileOffset)
|
|
|
|
{
|
|
|
|
FlushStart = FileOffset->QuadPart;
|
|
|
|
Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd);
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
goto quit;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
FlushStart = 0;
|
|
|
|
FlushEnd = SharedCacheMap->FileSize.QuadPart;
|
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2021-01-05 09:45:39 +00:00
|
|
|
Status = STATUS_SUCCESS;
|
|
|
|
if (IoStatus)
|
|
|
|
{
|
|
|
|
IoStatus->Information = 0;
|
|
|
|
}
|
|
|
|
|
2024-07-25 16:09:17 +00:00
|
|
|
KeAcquireGuardedMutex(&SharedCacheMap->FlushCacheLock);
|
|
|
|
|
2021-01-27 14:29:57 +00:00
|
|
|
/*
|
|
|
|
* We flush the VACBs that we find here.
|
|
|
|
* If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure.
|
|
|
|
* This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data
|
|
|
|
*/
|
2021-01-05 09:45:39 +00:00
|
|
|
while (FlushStart < FlushEnd)
|
|
|
|
{
|
2021-01-27 14:29:57 +00:00
|
|
|
BOOLEAN DirtyVacb = FALSE;
|
2021-01-05 09:45:39 +00:00
|
|
|
PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart);
|
|
|
|
|
|
|
|
if (vacb != NULL)
|
2011-12-17 12:59:01 +00:00
|
|
|
{
|
2021-01-05 09:45:39 +00:00
|
|
|
if (vacb->Dirty)
|
2011-12-17 12:59:01 +00:00
|
|
|
{
|
2021-05-23 11:25:59 +00:00
|
|
|
IO_STATUS_BLOCK VacbIosb = { 0 };
|
2021-01-29 17:48:32 +00:00
|
|
|
Status = CcRosFlushVacb(vacb, &VacbIosb);
|
2021-01-05 09:45:39 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
2011-12-17 12:59:01 +00:00
|
|
|
{
|
2024-06-28 14:16:14 +00:00
|
|
|
CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE);
|
2024-07-25 16:09:17 +00:00
|
|
|
break;
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
2021-01-27 14:29:57 +00:00
|
|
|
DirtyVacb = TRUE;
|
2021-02-03 12:34:15 +00:00
|
|
|
|
|
|
|
if (IoStatus)
|
|
|
|
IoStatus->Information += VacbIosb.Information;
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
|
|
|
|
2021-01-05 09:45:39 +00:00
|
|
|
CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE);
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
2021-01-05 09:45:39 +00:00
|
|
|
|
2021-01-27 14:29:57 +00:00
|
|
|
if (!DirtyVacb)
|
|
|
|
{
|
|
|
|
IO_STATUS_BLOCK MmIosb;
|
|
|
|
LARGE_INTEGER MmOffset;
|
|
|
|
|
|
|
|
MmOffset.QuadPart = FlushStart;
|
|
|
|
|
|
|
|
if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart)
|
|
|
|
{
|
|
|
|
/* The whole range fits within a VACB chunk. */
|
|
|
|
Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY);
|
|
|
|
Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!NT_SUCCESS(Status))
|
2024-07-25 16:09:17 +00:00
|
|
|
break;
|
2021-01-27 14:29:57 +00:00
|
|
|
|
|
|
|
if (IoStatus)
|
|
|
|
IoStatus->Information += MmIosb.Information;
|
2021-01-29 17:48:32 +00:00
|
|
|
|
|
|
|
/* Update VDL */
|
|
|
|
if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd)
|
|
|
|
SharedCacheMap->ValidDataLength.QuadPart = FlushEnd;
|
2021-01-27 14:29:57 +00:00
|
|
|
}
|
|
|
|
|
2021-01-05 09:45:39 +00:00
|
|
|
if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart)))
|
2011-12-17 12:59:01 +00:00
|
|
|
{
|
2021-01-05 09:45:39 +00:00
|
|
|
/* We're at the end of file ! */
|
|
|
|
break;
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
2021-01-27 14:29:57 +00:00
|
|
|
|
|
|
|
/* Round down to next VACB start now */
|
|
|
|
FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY;
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
2021-01-05 09:45:39 +00:00
|
|
|
|
2024-07-25 16:09:17 +00:00
|
|
|
KeReleaseGuardedMutex(&SharedCacheMap->FlushCacheLock);
|
|
|
|
|
2021-01-05 09:45:39 +00:00
|
|
|
quit:
|
|
|
|
if (IoStatus)
|
|
|
|
{
|
|
|
|
IoStatus->Status = Status;
|
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
}
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS
|
|
|
|
CcRosReleaseFileCache (
|
|
|
|
PFILE_OBJECT FileObject)
|
2002-08-08 17:54:16 +00:00
|
|
|
/*
|
|
|
|
* FUNCTION: Called by the file system when a handle to a file object
|
|
|
|
* has been closed.
|
|
|
|
*/
|
|
|
|
{
|
2018-02-09 10:05:15 +00:00
|
|
|
KIRQL OldIrql;
|
|
|
|
PPRIVATE_CACHE_MAP PrivateMap;
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2002-12-15 Casper S. Hornstrup <chorns@users.sourceforge.net>
* drivers/fs/vfat/cleanup.c (VfatCleanupFile): Only uninitialize caching
when initialized.
* drivers/fs/vfat/fcb.c (vfatReleaseFCB): Ditto.
* lib/kernel32/mem/section.c (CreateFileMappingW): Pass NULL as
MaximumSize to NtCreateSection if dwMaximumSizeHigh and dwMaximumSizeLow
are both 0.
* ntoskrnl/cc/pin.c (CcMapData): Assert if Bcb is NULL.
* ntoskrnl/cc/view.c (CcRosReleaseCacheSegment, CcRosLookupCacheSegment,
CcRosMarkDirtyCacheSegment, CcRosUnmapCacheSegment,
CcRosCreateCacheSegment, CcRosGetCacheSegmentChain,
CcRosGetCacheSegment, CcRosRequestCacheSegment, CcFlushCache,
CcRosDeleteFileCache, CcRosReferenceCache, CcRosDereferenceCache,
CcRosReleaseFileCache, CcGetFileObjectFromSectionPtrs): Ditto.
* ntoskrnl/mm/section.c (MiReadPage): Assert if Fcb->Bcb is NULL.
(MmCreateDataFileSection): Make sure caching is initialized for the file
stream.
svn path=/trunk/; revision=3879
2002-12-15 17:01:52 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
|
2002-08-17 15:14:26 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
2018-02-08 13:15:02 +00:00
|
|
|
|
2018-02-09 10:05:15 +00:00
|
|
|
/* Closing the handle, so kill the private cache map
|
|
|
|
* Before you event try to remove it from FO, always
|
|
|
|
* lock the master lock, to be sure not to race
|
|
|
|
* with a potential read ahead ongoing!
|
|
|
|
*/
|
|
|
|
PrivateMap = FileObject->PrivateCacheMap;
|
|
|
|
FileObject->PrivateCacheMap = NULL;
|
2018-02-08 13:15:02 +00:00
|
|
|
|
2018-02-09 10:05:15 +00:00
|
|
|
if (PrivateMap != NULL)
|
|
|
|
{
|
2018-02-08 13:15:02 +00:00
|
|
|
/* Remove it from the file */
|
2018-12-19 21:49:48 +00:00
|
|
|
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
2018-02-08 13:15:02 +00:00
|
|
|
RemoveEntryList(&PrivateMap->PrivateLinks);
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
2018-02-08 13:15:02 +00:00
|
|
|
|
2018-02-09 10:05:15 +00:00
|
|
|
/* And free it. */
|
2018-02-09 20:52:41 +00:00
|
|
|
if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
|
|
|
|
{
|
|
|
|
ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
PrivateMap->NodeTypeCode = 0;
|
|
|
|
}
|
2018-02-08 13:15:02 +00:00
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
ASSERT(SharedCacheMap->OpenCount > 0);
|
2018-12-19 21:49:48 +00:00
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
SharedCacheMap->OpenCount--;
|
|
|
|
if (SharedCacheMap->OpenCount == 0)
|
|
|
|
{
|
|
|
|
CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
}
|
2002-08-17 15:14:26 +00:00
|
|
|
}
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
2002-08-08 17:54:16 +00:00
|
|
|
}
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS
|
|
|
|
CcRosInitializeFileCache (
|
|
|
|
PFILE_OBJECT FileObject,
|
2014-08-17 21:47:43 +00:00
|
|
|
PCC_FILE_SIZES FileSizes,
|
2016-05-26 11:50:42 +00:00
|
|
|
BOOLEAN PinAccess,
|
2012-12-19 11:11:34 +00:00
|
|
|
PCACHE_MANAGER_CALLBACKS CallBacks,
|
|
|
|
PVOID LazyWriterContext)
|
2000-12-10 23:42:01 +00:00
|
|
|
/*
|
2014-04-12 10:59:48 +00:00
|
|
|
* FUNCTION: Initializes a shared cache map for a file object
|
2000-12-10 23:42:01 +00:00
|
|
|
*/
|
2002-08-17 15:14:26 +00:00
|
|
|
{
|
2018-02-07 22:18:54 +00:00
|
|
|
KIRQL OldIrql;
|
|
|
|
BOOLEAN Allocated;
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2004-08-10 01:49:37 +00:00
|
|
|
|
2018-05-23 06:44:43 +00:00
|
|
|
DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
|
2018-02-07 22:18:54 +00:00
|
|
|
Allocated = FALSE;
|
2018-05-23 06:37:50 +00:00
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
2014-04-12 10:59:48 +00:00
|
|
|
if (SharedCacheMap == NULL)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
|
|
|
|
if (SharedCacheMap == NULL)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2024-11-25 16:35:48 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
2015-06-21 09:20:01 +00:00
|
|
|
return STATUS_INSUFFICIENT_RESOURCES;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2014-04-12 10:59:48 +00:00
|
|
|
RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
|
2018-02-08 10:02:07 +00:00
|
|
|
SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
|
|
|
|
SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap->FileObject = FileObject;
|
|
|
|
SharedCacheMap->Callbacks = CallBacks;
|
|
|
|
SharedCacheMap->LazyWriteContext = LazyWriterContext;
|
2014-08-17 21:47:43 +00:00
|
|
|
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
|
|
|
|
SharedCacheMap->FileSize = FileSizes->FileSize;
|
2021-01-05 17:39:55 +00:00
|
|
|
SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength;
|
2016-05-26 11:50:42 +00:00
|
|
|
SharedCacheMap->PinAccess = PinAccess;
|
2018-01-23 22:23:32 +00:00
|
|
|
SharedCacheMap->DirtyPageThreshold = 0;
|
2018-01-26 09:40:05 +00:00
|
|
|
SharedCacheMap->DirtyPages = 0;
|
2018-02-07 22:18:54 +00:00
|
|
|
InitializeListHead(&SharedCacheMap->PrivateList);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
|
|
|
|
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
|
2018-09-02 12:37:47 +00:00
|
|
|
InitializeListHead(&SharedCacheMap->BcbList);
|
2024-07-25 16:09:17 +00:00
|
|
|
KeInitializeGuardedMutex(&SharedCacheMap->FlushCacheLock);
|
2018-01-24 20:24:05 +00:00
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION;
|
|
|
|
|
|
|
|
ObReferenceObjectByPointer(FileObject,
|
|
|
|
FILE_ALL_ACCESS,
|
|
|
|
NULL,
|
|
|
|
KernelMode);
|
|
|
|
|
2024-11-25 16:35:48 +00:00
|
|
|
Allocated = TRUE;
|
2020-11-06 08:39:31 +00:00
|
|
|
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
|
|
|
|
|
2021-01-05 09:45:39 +00:00
|
|
|
//CcRosTraceCacheMap(SharedCacheMap, TRUE);
|
2020-11-06 08:39:31 +00:00
|
|
|
}
|
|
|
|
else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION)
|
2018-06-04 12:36:07 +00:00
|
|
|
{
|
2020-11-06 08:39:31 +00:00
|
|
|
/* The shared cache map is being created somewhere else. Wait for that to happen */
|
|
|
|
KEVENT Waiter;
|
|
|
|
PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent;
|
2018-06-04 12:36:07 +00:00
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
KeInitializeEvent(&Waiter, NotificationEvent, FALSE);
|
|
|
|
SharedCacheMap->CreateEvent = &Waiter;
|
|
|
|
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
|
|
|
|
|
|
KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL);
|
|
|
|
|
|
|
|
if (PreviousWaiter)
|
|
|
|
KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE);
|
|
|
|
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2020-11-06 08:39:31 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
if (FileObject->PrivateCacheMap == NULL)
|
|
|
|
{
|
2018-02-07 22:18:54 +00:00
|
|
|
PPRIVATE_CACHE_MAP PrivateMap;
|
|
|
|
|
|
|
|
/* Allocate the private cache map for this handle */
|
2018-02-09 20:52:41 +00:00
|
|
|
if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
|
|
|
|
{
|
|
|
|
PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
PrivateMap = &SharedCacheMap->PrivateCacheMap;
|
|
|
|
}
|
|
|
|
|
2018-02-07 22:18:54 +00:00
|
|
|
if (PrivateMap == NULL)
|
|
|
|
{
|
|
|
|
/* If we also allocated the shared cache map for this file, kill it */
|
|
|
|
if (Allocated)
|
|
|
|
{
|
|
|
|
RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
|
|
|
|
|
|
|
|
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
|
|
|
|
ObDereferenceObject(FileObject);
|
|
|
|
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
|
|
|
|
}
|
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
2018-02-07 22:18:54 +00:00
|
|
|
return STATUS_INSUFFICIENT_RESOURCES;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize it */
|
|
|
|
RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
|
|
|
|
PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
|
2018-02-08 08:46:08 +00:00
|
|
|
PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
|
2018-02-07 22:18:54 +00:00
|
|
|
PrivateMap->FileObject = FileObject;
|
2018-02-08 13:02:42 +00:00
|
|
|
KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
|
2018-02-07 22:18:54 +00:00
|
|
|
|
|
|
|
/* Link it to the file */
|
2018-12-19 21:49:48 +00:00
|
|
|
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
2018-02-07 22:18:54 +00:00
|
|
|
InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
2018-02-07 22:18:54 +00:00
|
|
|
|
|
|
|
FileObject->PrivateCacheMap = PrivateMap;
|
2017-04-16 08:50:20 +00:00
|
|
|
SharedCacheMap->OpenCount++;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2020-11-06 08:39:31 +00:00
|
|
|
|
2018-12-19 21:49:48 +00:00
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2020-11-06 08:39:31 +00:00
|
|
|
/* Create the section */
|
|
|
|
if (Allocated)
|
|
|
|
{
|
|
|
|
NTSTATUS Status;
|
|
|
|
|
|
|
|
ASSERT(SharedCacheMap->Section == NULL);
|
|
|
|
|
|
|
|
Status = MmCreateSection(
|
|
|
|
&SharedCacheMap->Section,
|
|
|
|
SECTION_ALL_ACCESS,
|
|
|
|
NULL,
|
|
|
|
&SharedCacheMap->SectionSize,
|
|
|
|
PAGE_READWRITE,
|
2020-12-11 14:29:35 +00:00
|
|
|
SEC_RESERVE,
|
2020-11-06 08:39:31 +00:00
|
|
|
NULL,
|
|
|
|
FileObject);
|
|
|
|
|
|
|
|
ASSERT(NT_SUCCESS(Status));
|
|
|
|
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
CcRosReleaseFileCache(FileObject);
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
|
|
|
|
InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
|
|
|
|
SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION;
|
|
|
|
|
|
|
|
if (SharedCacheMap->CreateEvent)
|
|
|
|
{
|
|
|
|
KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE);
|
|
|
|
SharedCacheMap->CreateEvent = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
|
|
}
|
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
1998-12-21 15:48:21 +00:00
|
|
|
}
|
1999-02-01 20:58:37 +00:00
|
|
|
|
2003-07-10 06:27:13 +00:00
|
|
|
/*
|
|
|
|
* @implemented
|
|
|
|
*/
|
2012-12-19 11:11:34 +00:00
|
|
|
PFILE_OBJECT
|
|
|
|
NTAPI
|
|
|
|
CcGetFileObjectFromSectionPtrs (
|
|
|
|
IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
|
2002-10-02 19:20:51 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2014-11-29 21:43:39 +00:00
|
|
|
|
|
|
|
CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = SectionObjectPointers->SharedCacheMap;
|
|
|
|
ASSERT(SharedCacheMap);
|
|
|
|
return SharedCacheMap->FileObject;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
2002-10-02 19:20:51 +00:00
|
|
|
}
|
|
|
|
|
2020-10-06 19:44:01 +00:00
|
|
|
CODE_SEG("INIT")
|
2020-05-23 13:56:10 +00:00
|
|
|
VOID
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
CcInitView (
|
|
|
|
VOID)
|
2001-04-09 02:45:04 +00:00
|
|
|
{
|
2012-12-19 11:11:34 +00:00
|
|
|
DPRINT("CcInitView()\n");
|
2003-01-11 15:24:38 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
InitializeListHead(&DirtyVacbListHead);
|
|
|
|
InitializeListHead(&VacbLruListHead);
|
2018-01-23 21:56:23 +00:00
|
|
|
InitializeListHead(&CcDeferredWrites);
|
2018-01-24 20:24:05 +00:00
|
|
|
InitializeListHead(&CcCleanSharedCacheMapList);
|
2018-01-23 21:56:23 +00:00
|
|
|
KeInitializeSpinLock(&CcDeferredWriteSpinLock);
|
2014-04-12 11:12:14 +00:00
|
|
|
ExInitializeNPagedLookasideList(&iBcbLookasideList,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
sizeof(INTERNAL_BCB),
|
|
|
|
TAG_BCB,
|
|
|
|
20);
|
|
|
|
ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
sizeof(ROS_SHARED_CACHE_MAP),
|
|
|
|
TAG_SHARED_CACHE_MAP,
|
|
|
|
20);
|
|
|
|
ExInitializeNPagedLookasideList(&VacbLookasideList,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
sizeof(ROS_VACB),
|
|
|
|
TAG_VACB,
|
|
|
|
20);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
CcInitCacheZeroPage();
|
2001-04-09 02:45:04 +00:00
|
|
|
}
|
2000-03-05 19:17:43 +00:00
|
|
|
|
2018-01-24 21:03:23 +00:00
|
|
|
#if DBG && defined(KDBG)
|
2023-04-04 21:38:32 +00:00
|
|
|
|
|
|
|
#include <kdbg/kdb.h>
|
|
|
|
|
2018-01-24 20:45:37 +00:00
|
|
|
BOOLEAN
|
|
|
|
ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
|
|
|
|
{
|
|
|
|
PLIST_ENTRY ListEntry;
|
|
|
|
UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
|
|
|
|
|
2018-01-25 12:47:00 +00:00
|
|
|
KdbpPrint(" Usage Summary (in kb)\n");
|
2021-01-05 09:41:41 +00:00
|
|
|
KdbpPrint("Shared\t\tMapped\tDirty\tName\n");
|
2018-01-24 20:45:37 +00:00
|
|
|
/* No need to lock the spin lock here, we're in DBG */
|
|
|
|
for (ListEntry = CcCleanSharedCacheMapList.Flink;
|
|
|
|
ListEntry != &CcCleanSharedCacheMapList;
|
|
|
|
ListEntry = ListEntry->Flink)
|
|
|
|
{
|
|
|
|
PLIST_ENTRY Vacbs;
|
2021-01-05 09:41:41 +00:00
|
|
|
ULONG Mapped = 0, Dirty = 0;
|
2018-01-24 20:45:37 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
|
|
|
PUNICODE_STRING FileName;
|
2018-04-27 16:57:30 +00:00
|
|
|
PWSTR Extra = L"";
|
2018-01-24 20:45:37 +00:00
|
|
|
|
|
|
|
SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
|
|
|
|
|
2018-01-26 09:50:13 +00:00
|
|
|
/* Dirty size */
|
|
|
|
Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
|
|
|
|
|
2018-01-24 20:45:37 +00:00
|
|
|
/* First, count for all the associated VACB */
|
|
|
|
for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
|
|
|
|
Vacbs != &SharedCacheMap->CacheMapVacbListHead;
|
|
|
|
Vacbs = Vacbs->Flink)
|
|
|
|
{
|
2021-01-05 09:41:41 +00:00
|
|
|
Mapped += VACB_MAPPING_GRANULARITY / 1024;
|
2018-01-24 20:45:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup name */
|
|
|
|
if (SharedCacheMap->FileObject != NULL &&
|
|
|
|
SharedCacheMap->FileObject->FileName.Length != 0)
|
|
|
|
{
|
|
|
|
FileName = &SharedCacheMap->FileObject->FileName;
|
|
|
|
}
|
2018-04-27 16:57:30 +00:00
|
|
|
else if (SharedCacheMap->FileObject != NULL &&
|
|
|
|
SharedCacheMap->FileObject->FsContext != NULL &&
|
|
|
|
((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
|
|
|
|
((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
|
|
|
|
((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
|
|
|
|
{
|
|
|
|
FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
|
|
|
|
Extra = L" (FastFAT)";
|
|
|
|
}
|
2018-01-24 20:45:37 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
FileName = &NoName;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And print */
|
2021-01-05 09:41:41 +00:00
|
|
|
KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra);
|
2018-01-24 20:45:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
2018-02-09 11:16:29 +00:00
|
|
|
|
|
|
|
BOOLEAN
|
|
|
|
ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
|
|
|
|
{
|
|
|
|
KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
|
|
|
|
(CcTotalDirtyPages * PAGE_SIZE) / 1024);
|
|
|
|
KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
|
|
|
|
(CcDirtyPageThreshold * PAGE_SIZE) / 1024);
|
|
|
|
KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
|
|
|
|
(MmAvailablePages * PAGE_SIZE) / 1024);
|
2018-02-09 13:21:14 +00:00
|
|
|
KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
|
|
|
|
(MmThrottleTop * PAGE_SIZE) / 1024);
|
|
|
|
KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
|
|
|
|
(MmThrottleBottom * PAGE_SIZE) / 1024);
|
|
|
|
KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
|
|
|
|
(MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
|
2018-02-09 11:16:29 +00:00
|
|
|
|
|
|
|
if (CcTotalDirtyPages >= CcDirtyPageThreshold)
|
|
|
|
{
|
|
|
|
KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
|
|
|
|
}
|
|
|
|
else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
|
|
|
|
{
|
|
|
|
KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
2023-04-04 21:38:32 +00:00
|
|
|
|
|
|
|
#endif // DBG && defined(KDBG)
|
2018-01-24 20:45:37 +00:00
|
|
|
|
2000-02-26 22:41:35 +00:00
|
|
|
/* EOF */
|