2008-03-09 14:11:42 +00:00
|
|
|
/*
|
2005-01-26 13:58:37 +00:00
|
|
|
* COPYRIGHT: See COPYING in the top level directory
|
1998-12-21 15:48:21 +00:00
|
|
|
* PROJECT: ReactOS kernel
|
|
|
|
* FILE: ntoskrnl/cc/view.c
|
|
|
|
* PURPOSE: Cache manager
|
2005-01-26 13:58:37 +00:00
|
|
|
*
|
|
|
|
* PROGRAMMERS: David Welch (welch@mcmail.com)
|
1998-12-21 15:48:21 +00:00
|
|
|
*/
|
|
|
|
|
2000-12-10 23:42:01 +00:00
|
|
|
/* NOTES **********************************************************************
|
|
|
|
*
|
2005-05-09 01:38:29 +00:00
|
|
|
* This is not the NT implementation of a file cache nor anything much like
|
|
|
|
* it.
|
2002-05-05 14:57:45 +00:00
|
|
|
*
|
2005-05-09 01:38:29 +00:00
|
|
|
* The general procedure for a filesystem to implement a read or write
|
2000-12-10 23:42:01 +00:00
|
|
|
* dispatch routine is as follows
|
2005-05-09 01:38:29 +00:00
|
|
|
*
|
2000-12-10 23:42:01 +00:00
|
|
|
* (1) If caching for the FCB hasn't been initiated then so do by calling
|
|
|
|
* CcInitializeFileCache.
|
2005-05-09 01:38:29 +00:00
|
|
|
*
|
2000-12-10 23:42:01 +00:00
|
|
|
* (2) For each 4k region which is being read or written obtain a cache page
|
2005-05-09 01:38:29 +00:00
|
|
|
* by calling CcRequestCachePage.
|
2000-12-10 23:42:01 +00:00
|
|
|
*
|
2005-05-09 01:38:29 +00:00
|
|
|
* (3) If either the page is being read or not completely written, and it is
|
2000-12-10 23:42:01 +00:00
|
|
|
* not up to date then read its data from the underlying medium. If the read
|
2005-05-09 01:38:29 +00:00
|
|
|
* fails then call CcReleaseCachePage with VALID as FALSE and return a error.
|
|
|
|
*
|
2000-12-10 23:42:01 +00:00
|
|
|
* (4) Copy the data into or out of the page as necessary.
|
2005-05-09 01:38:29 +00:00
|
|
|
*
|
2000-12-10 23:42:01 +00:00
|
|
|
* (5) Release the cache page
|
|
|
|
*/
|
|
|
|
/* INCLUDES ******************************************************************/
|
1998-12-21 15:48:21 +00:00
|
|
|
|
2004-08-15 16:39:12 +00:00
|
|
|
#include <ntoskrnl.h>
|
1999-05-29 00:15:17 +00:00
|
|
|
#define NDEBUG
|
2008-08-30 16:31:06 +00:00
|
|
|
#include <debug.h>
|
1998-12-21 15:48:21 +00:00
|
|
|
|
2005-11-28 23:25:31 +00:00
|
|
|
#if defined (ALLOC_PRAGMA)
|
|
|
|
#pragma alloc_text(INIT, CcInitView)
|
|
|
|
#endif
|
|
|
|
|
2001-01-01 04:42:12 +00:00
|
|
|
/* GLOBALS *******************************************************************/
|
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
static LIST_ENTRY DirtyVacbListHead;
|
|
|
|
static LIST_ENTRY VacbLruListHead;
|
2013-09-30 20:06:18 +00:00
|
|
|
ULONG DirtyPageCount = 0;
|
2001-04-09 02:45:04 +00:00
|
|
|
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
KGUARDED_MUTEX ViewLock;
|
1999-01-16 21:03:00 +00:00
|
|
|
|
2003-01-11 15:24:38 +00:00
|
|
|
NPAGED_LOOKASIDE_LIST iBcbLookasideList;
|
2014-04-12 10:59:48 +00:00
|
|
|
static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
|
2014-04-12 09:31:07 +00:00
|
|
|
static NPAGED_LOOKASIDE_LIST VacbLookasideList;
|
2003-01-11 15:24:38 +00:00
|
|
|
|
2009-10-17 15:03:54 +00:00
|
|
|
#if DBG
|
2014-04-12 09:31:07 +00:00
|
|
|
static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
|
2005-08-13 13:16:16 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
++vacb->ReferenceCount;
|
2014-04-12 10:59:48 +00:00
|
|
|
if (vacb->SharedCacheMap->Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
|
|
|
|
file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2005-08-13 13:16:16 +00:00
|
|
|
}
|
2014-04-12 09:31:07 +00:00
|
|
|
static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
|
2005-08-13 13:16:16 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
--vacb->ReferenceCount;
|
2014-04-12 10:59:48 +00:00
|
|
|
if (vacb->SharedCacheMap->Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
|
|
|
|
file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2005-08-13 13:16:16 +00:00
|
|
|
}
|
2014-04-12 09:31:07 +00:00
|
|
|
#define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
|
|
|
|
#define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
|
2005-08-13 13:16:16 +00:00
|
|
|
#else
|
2014-04-12 09:31:07 +00:00
|
|
|
#define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
|
|
|
|
#define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
|
2005-08-13 13:16:16 +00:00
|
|
|
#endif
|
2002-09-30 20:55:33 +00:00
|
|
|
|
2003-01-11 15:24:38 +00:00
|
|
|
NTSTATUS
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosInternalFreeVacb(PROS_VACB Vacb);
|
2001-12-29 14:32:22 +00:00
|
|
|
|
2005-11-19 22:13:35 +00:00
|
|
|
|
2001-12-27 23:56:42 +00:00
|
|
|
/* FUNCTIONS *****************************************************************/
|
1998-12-21 15:48:21 +00:00
|
|
|
|
2005-08-13 13:16:16 +00:00
|
|
|
VOID
|
2008-11-29 20:47:48 +00:00
|
|
|
NTAPI
|
2005-08-13 13:16:16 +00:00
|
|
|
CcRosTraceCacheMap (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN Trace )
|
2005-08-13 13:16:16 +00:00
|
|
|
{
|
2009-10-17 15:03:54 +00:00
|
|
|
#if DBG
|
2012-12-19 11:11:34 +00:00
|
|
|
KIRQL oldirql;
|
|
|
|
PLIST_ENTRY current_entry;
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
if (!SharedCacheMap)
|
2012-12-19 11:11:34 +00:00
|
|
|
return;
|
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap->Trace = Trace;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
if (Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
|
|
|
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
|
2012-12-19 11:11:34 +00:00
|
|
|
current_entry = current_entry->Flink;
|
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
|
2012-12-19 11:11:34 +00:00
|
|
|
current, current->ReferenceCount, current->Dirty, current->PageOut );
|
|
|
|
}
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2005-08-13 13:16:16 +00:00
|
|
|
|
|
|
|
#else
|
2014-04-12 10:59:48 +00:00
|
|
|
UNREFERENCED_PARAMETER(SharedCacheMap);
|
|
|
|
UNREFERENCED_PARAMETER(Trace);
|
2005-08-13 13:16:16 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-08-25 15:08:29 +00:00
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosFlushVacb (
|
|
|
|
PROS_VACB Vacb)
|
2002-08-14 20:58:39 +00:00
|
|
|
{
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
NTSTATUS Status;
|
|
|
|
KIRQL oldIrql;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
Status = CcWriteVirtualAddress(Vacb);
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
if (NT_SUCCESS(Status))
|
2002-08-14 20:58:39 +00:00
|
|
|
{
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
Vacb->Dirty = FALSE;
|
|
|
|
RemoveEntryList(&Vacb->DirtyVacbListEntry);
|
2013-09-30 19:40:09 +00:00
|
|
|
DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2002-08-14 20:58:39 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return Status;
|
2002-08-14 20:58:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
CcRosFlushDirtyPages (
|
|
|
|
ULONG Target,
|
|
|
|
PULONG Count,
|
|
|
|
BOOLEAN Wait)
|
2002-08-14 20:58:39 +00:00
|
|
|
{
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
PLIST_ENTRY current_entry;
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
BOOLEAN Locked;
|
|
|
|
NTSTATUS Status;
|
2011-12-21 19:46:07 +00:00
|
|
|
LARGE_INTEGER ZeroTimeout;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2013-02-16 17:37:17 +00:00
|
|
|
DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
(*Count) = 0;
|
2011-12-21 19:46:07 +00:00
|
|
|
ZeroTimeout.QuadPart = 0;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2010-10-05 15:52:00 +00:00
|
|
|
KeEnterCriticalRegion();
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
current_entry = DirtyVacbListHead.Flink;
|
|
|
|
if (current_entry == &DirtyVacbListHead)
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
{
|
|
|
|
DPRINT("No Dirty pages\n");
|
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
while ((current_entry != &DirtyVacbListHead) && (Target > 0))
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
|
|
|
DirtyVacbListEntry);
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
current_entry = current_entry->Flink;
|
2007-11-18 23:11:20 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbIncRefCount(current);
|
2011-12-23 23:20:09 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
|
|
|
|
current->SharedCacheMap->LazyWriteContext, Wait);
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
if (!Locked)
|
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(current);
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
continue;
|
|
|
|
}
|
2011-12-21 19:08:59 +00:00
|
|
|
|
2011-12-21 19:46:07 +00:00
|
|
|
Status = KeWaitForSingleObject(¤t->Mutex,
|
|
|
|
Executive,
|
|
|
|
KernelMode,
|
|
|
|
FALSE,
|
|
|
|
Wait ? NULL : &ZeroTimeout);
|
|
|
|
if (Status != STATUS_SUCCESS)
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
|
|
|
|
current->SharedCacheMap->LazyWriteContext);
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(current);
|
2011-12-21 19:46:07 +00:00
|
|
|
continue;
|
|
|
|
}
|
2011-12-21 19:08:59 +00:00
|
|
|
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
ASSERT(current->Dirty);
|
2011-12-24 05:43:36 +00:00
|
|
|
|
|
|
|
/* One reference is added above */
|
|
|
|
if (current->ReferenceCount > 2)
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
{
|
2013-09-29 20:52:23 +00:00
|
|
|
KeReleaseMutex(¤t->Mutex, FALSE);
|
2014-04-12 10:59:48 +00:00
|
|
|
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
|
|
|
|
current->SharedCacheMap->LazyWriteContext);
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(current);
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
continue;
|
|
|
|
}
|
2011-12-21 19:08:59 +00:00
|
|
|
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2008-03-08 16:47:41 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
Status = CcRosFlushVacb(current);
|
2003-01-30 18:30:53 +00:00
|
|
|
|
2013-09-29 20:52:23 +00:00
|
|
|
KeReleaseMutex(¤t->Mutex, FALSE);
|
2014-04-12 10:59:48 +00:00
|
|
|
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
|
|
|
|
current->SharedCacheMap->LazyWriteContext);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2011-12-23 23:20:09 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(current);
|
2007-10-19 23:21:45 +00:00
|
|
|
|
2015-10-18 16:09:11 +00:00
|
|
|
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
|
|
|
|
(Status != STATUS_MEDIA_WRITE_PROTECTED))
|
2005-11-27 15:59:32 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT1("CC: Failed to flush VACB.\n");
|
2005-11-27 15:59:32 +00:00
|
|
|
}
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
else
|
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
(*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
|
|
|
Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
}
|
2011-12-23 23:20:09 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
current_entry = DirtyVacbListHead.Flink;
|
2002-08-14 20:58:39 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2010-10-05 15:52:00 +00:00
|
|
|
KeLeaveCriticalRegion();
|
2012-12-19 11:11:34 +00:00
|
|
|
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
DPRINT("CcRosFlushDirtyPages() finished\n");
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
2002-08-14 20:58:39 +00:00
|
|
|
}
|
|
|
|
|
2001-12-29 14:32:22 +00:00
|
|
|
NTSTATUS
|
2012-12-19 11:11:34 +00:00
|
|
|
CcRosTrimCache (
|
|
|
|
ULONG Target,
|
|
|
|
ULONG Priority,
|
|
|
|
PULONG NrFreed)
|
2002-05-19 14:09:35 +00:00
|
|
|
/*
|
|
|
|
* FUNCTION: Try to free some memory from the file cache.
|
|
|
|
* ARGUMENTS:
|
|
|
|
* Target - The number of pages to be freed.
|
|
|
|
* Priority - The priority of free (currently unused).
|
2005-05-09 01:38:29 +00:00
|
|
|
* NrFreed - Points to a variable where the number of pages
|
2002-05-19 14:09:35 +00:00
|
|
|
* actually freed is returned.
|
|
|
|
*/
|
2001-12-29 14:32:22 +00:00
|
|
|
{
|
2009-02-18 16:56:42 +00:00
|
|
|
PLIST_ENTRY current_entry;
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
2009-02-18 16:56:42 +00:00
|
|
|
ULONG PagesFreed;
|
|
|
|
KIRQL oldIrql;
|
|
|
|
LIST_ENTRY FreeList;
|
2011-11-30 17:10:43 +00:00
|
|
|
PFN_NUMBER Page;
|
|
|
|
ULONG i;
|
2011-11-30 21:58:46 +00:00
|
|
|
BOOLEAN FlushedPages = FALSE;
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2013-02-16 17:37:17 +00:00
|
|
|
DPRINT("CcRosTrimCache(Target %lu)\n", Target);
|
2011-11-30 17:10:43 +00:00
|
|
|
|
|
|
|
InitializeListHead(&FreeList);
|
|
|
|
|
2009-02-18 16:56:42 +00:00
|
|
|
*NrFreed = 0;
|
2005-05-09 01:38:29 +00:00
|
|
|
|
2011-11-30 21:58:46 +00:00
|
|
|
retry:
|
2009-02-18 16:56:42 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
current_entry = VacbLruListHead.Flink;
|
|
|
|
while (current_entry != &VacbLruListHead)
|
2001-12-29 14:32:22 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
|
|
|
VacbLruListEntry);
|
2009-02-18 16:56:42 +00:00
|
|
|
current_entry = current_entry->Flink;
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql);
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
/* Reference the VACB */
|
|
|
|
CcRosVacbIncRefCount(current);
|
2011-11-30 17:10:43 +00:00
|
|
|
|
|
|
|
/* Check if it's mapped and not dirty */
|
|
|
|
if (current->MappedCount > 0 && !current->Dirty)
|
2009-02-18 16:56:42 +00:00
|
|
|
{
|
2011-11-30 17:10:43 +00:00
|
|
|
/* We have to break these locks because Cc sucks */
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql);
|
2009-02-18 16:56:42 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
/* Page out the VACB */
|
2013-09-30 19:40:09 +00:00
|
|
|
for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
|
2009-02-18 16:56:42 +00:00
|
|
|
{
|
2011-11-30 17:10:43 +00:00
|
|
|
Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
|
|
|
|
|
[NTOS]: Remove useless variables in kernel code that were set, but never actually used (dead code, tests, copy/pasters). If a variable was set but not used because of missing/#if'ed out code, a note was added instead.
[NTOS]: In the process, fix bugs in the Event dispatcher code that used Win32 EVENT_TYPE instead of NT KOBJECTS enumeration.
[NTOS]: Fix a bug in ObpInsertHandleCount, where the object access check was being done with the previous mode, instead of honoring the probe mode, which is defined by OBJ_FORCE_ACCESS_CHECK.
[NTOS]: Fix a bug in a section function which was always returning STATUS_SUCCESS, now it returns the result of the previous Status = function assignment. If this isn't desired, then don't check for the Status anymore.
[NTOS]: Note that MDL code does not support SkipBytes argument. If it is used, MDL could be invalid.
[NTOS]: Add checks for VerifierAllocation and set it when needed (WIP).
[NTOS]: Clarify what _WORKING_LINKER_ is, and the legal risks in continuing to use a linker that builds non-Microsoft drivers when used with headers whose EULA specify that they can only be used for Microsoft drivers.
svn path=/trunk/; revision=48692
2010-09-04 08:17:17 +00:00
|
|
|
MmPageOutPhysicalAddress(Page);
|
2009-02-18 16:56:42 +00:00
|
|
|
}
|
2011-11-30 17:10:43 +00:00
|
|
|
|
|
|
|
/* Reacquire the locks */
|
2009-02-18 16:56:42 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql);
|
2009-02-18 16:56:42 +00:00
|
|
|
}
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
/* Dereference the VACB */
|
|
|
|
CcRosVacbDecRefCount(current);
|
2011-11-30 17:10:43 +00:00
|
|
|
|
|
|
|
/* Check if we can free this entry now */
|
2009-02-18 16:56:42 +00:00
|
|
|
if (current->ReferenceCount == 0)
|
|
|
|
{
|
2011-11-30 17:10:43 +00:00
|
|
|
ASSERT(!current->Dirty);
|
|
|
|
ASSERT(!current->MappedCount);
|
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
RemoveEntryList(¤t->CacheMapVacbListEntry);
|
2014-04-12 09:31:07 +00:00
|
|
|
RemoveEntryList(¤t->VacbLruListEntry);
|
2014-04-12 10:59:48 +00:00
|
|
|
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
|
2011-11-30 17:10:43 +00:00
|
|
|
|
|
|
|
/* Calculate how many pages we freed for Mm */
|
2014-04-12 09:31:07 +00:00
|
|
|
PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
|
2011-11-30 17:10:43 +00:00
|
|
|
Target -= PagesFreed;
|
|
|
|
(*NrFreed) += PagesFreed;
|
2009-02-18 16:56:42 +00:00
|
|
|
}
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql);
|
2009-02-18 16:56:42 +00:00
|
|
|
}
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2009-02-18 16:56:42 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2011-11-30 21:58:46 +00:00
|
|
|
/* Try flushing pages if we haven't met our target */
|
2012-12-19 11:11:34 +00:00
|
|
|
if ((Target > 0) && !FlushedPages)
|
2011-11-30 21:58:46 +00:00
|
|
|
{
|
|
|
|
/* Flush dirty pages to disk */
|
2011-12-21 19:46:07 +00:00
|
|
|
CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
|
2011-11-30 21:58:46 +00:00
|
|
|
FlushedPages = TRUE;
|
|
|
|
|
|
|
|
/* We can only swap as many pages as we flushed */
|
|
|
|
if (PagesFreed < Target) Target = PagesFreed;
|
|
|
|
|
|
|
|
/* Check if we flushed anything */
|
|
|
|
if (PagesFreed != 0)
|
|
|
|
{
|
|
|
|
/* Try again after flushing dirty pages */
|
2013-02-16 17:37:17 +00:00
|
|
|
DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
|
2011-11-30 21:58:46 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-18 16:56:42 +00:00
|
|
|
while (!IsListEmpty(&FreeList))
|
|
|
|
{
|
|
|
|
current_entry = RemoveHeadList(&FreeList);
|
2014-04-12 09:31:07 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
2014-04-12 10:59:48 +00:00
|
|
|
CacheMapVacbListEntry);
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosInternalFreeVacb(current);
|
2009-02-18 16:56:42 +00:00
|
|
|
}
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2013-02-16 17:37:17 +00:00
|
|
|
DPRINT("Evicted %lu cache pages\n", (*NrFreed));
|
2011-11-30 17:10:43 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
2001-12-29 14:32:22 +00:00
|
|
|
}
|
|
|
|
|
2005-05-09 01:38:29 +00:00
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosReleaseVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB Vacb,
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN Valid,
|
|
|
|
BOOLEAN Dirty,
|
|
|
|
BOOLEAN Mapped)
|
1999-05-29 00:15:17 +00:00
|
|
|
{
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN WasDirty;
|
|
|
|
KIRQL oldIrql;
|
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2001-12-29 14:32:22 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
|
|
|
|
SharedCacheMap, Vacb, Valid);
|
2002-12-15 Casper S. Hornstrup <chorns@users.sourceforge.net>
* drivers/fs/vfat/cleanup.c (VfatCleanupFile): Only uninitialize caching
when initialized.
* drivers/fs/vfat/fcb.c (vfatReleaseFCB): Ditto.
* lib/kernel32/mem/section.c (CreateFileMappingW): Pass NULL as
MaximumSize to NtCreateSection if dwMaximumSizeHigh and dwMaximumSizeLow
are both 0.
* ntoskrnl/cc/pin.c (CcMapData): Assert if Bcb is NULL.
* ntoskrnl/cc/view.c (CcRosReleaseCacheSegment, CcRosLookupCacheSegment,
CcRosMarkDirtyCacheSegment, CcRosUnmapCacheSegment,
CcRosCreateCacheSegment, CcRosGetCacheSegmentChain,
CcRosGetCacheSegment, CcRosRequestCacheSegment, CcFlushCache,
CcRosDeleteFileCache, CcRosReferenceCache, CcRosDereferenceCache,
CcRosReleaseFileCache, CcGetFileObjectFromSectionPtrs): Ditto.
* ntoskrnl/mm/section.c (MiReadPage): Assert if Fcb->Bcb is NULL.
(MmCreateDataFileSection): Make sure caching is initialized for the file
stream.
svn path=/trunk/; revision=3879
2002-12-15 17:01:52 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
Vacb->Valid = Valid;
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
WasDirty = Vacb->Dirty;
|
|
|
|
Vacb->Dirty = Vacb->Dirty || Dirty;
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
if (!WasDirty && Vacb->Dirty)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
|
2013-09-30 19:40:09 +00:00
|
|
|
DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
if (Mapped)
|
2002-07-17 21:04:57 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
Vacb->MappedCount++;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(Vacb);
|
|
|
|
if (Mapped && (Vacb->MappedCount == 1))
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbIncRefCount(Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2014-04-12 09:31:07 +00:00
|
|
|
if (!WasDirty && Vacb->Dirty)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbIncRefCount(Vacb);
|
2002-07-17 21:04:57 +00:00
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2014-04-12 09:31:07 +00:00
|
|
|
KeReleaseMutex(&Vacb->Mutex, FALSE);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
1999-05-29 00:15:17 +00:00
|
|
|
}
|
1998-12-21 15:48:21 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
/* Returns with VACB Lock Held! */
|
|
|
|
PROS_VACB
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosLookupVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset)
|
2001-12-31 01:53:46 +00:00
|
|
|
{
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
PLIST_ENTRY current_entry;
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
KIRQL oldIrql;
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-08-31 12:56:36 +00:00
|
|
|
DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap, FileOffset);
|
2011-12-24 03:57:10 +00:00
|
|
|
|
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
|
|
|
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
2001-12-31 01:53:46 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
2014-04-12 10:59:48 +00:00
|
|
|
CacheMapVacbListEntry);
|
2014-04-12 11:51:46 +00:00
|
|
|
if (IsPointInRange(current->FileOffset.QuadPart,
|
|
|
|
VACB_MAPPING_GRANULARITY,
|
2014-04-12 09:31:07 +00:00
|
|
|
FileOffset))
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbIncRefCount(current);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
2011-12-24 03:57:10 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2011-12-21 19:08:59 +00:00
|
|
|
KeWaitForSingleObject(¤t->Mutex,
|
|
|
|
Executive,
|
|
|
|
KernelMode,
|
|
|
|
FALSE,
|
|
|
|
NULL);
|
2013-09-29 20:52:23 +00:00
|
|
|
return current;
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
}
|
2014-04-12 11:51:46 +00:00
|
|
|
if (current->FileOffset.QuadPart > FileOffset)
|
2013-09-29 21:08:15 +00:00
|
|
|
break;
|
- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you should never acquire it recursively.
For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
"CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
ASSERTS in the fast mutex code.
-- Alex Ionescu
svn path=/trunk/; revision=30401
2007-11-12 19:00:26 +00:00
|
|
|
current_entry = current_entry->Flink;
|
2001-12-31 01:53:46 +00:00
|
|
|
}
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
2011-12-24 03:57:10 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
|
|
|
|
2013-09-29 20:52:23 +00:00
|
|
|
return NULL;
|
2001-12-31 01:53:46 +00:00
|
|
|
}
|
|
|
|
|
2002-08-14 20:58:39 +00:00
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosMarkDirtyVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset)
|
2002-08-14 20:58:39 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB Vacb;
|
2012-12-19 11:11:34 +00:00
|
|
|
KIRQL oldIrql;
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2002-12-15 Casper S. Hornstrup <chorns@users.sourceforge.net>
* drivers/fs/vfat/cleanup.c (VfatCleanupFile): Only uninitialize caching
when initialized.
* drivers/fs/vfat/fcb.c (vfatReleaseFCB): Ditto.
* lib/kernel32/mem/section.c (CreateFileMappingW): Pass NULL as
MaximumSize to NtCreateSection if dwMaximumSizeHigh and dwMaximumSizeLow
are both 0.
* ntoskrnl/cc/pin.c (CcMapData): Assert if Bcb is NULL.
* ntoskrnl/cc/view.c (CcRosReleaseCacheSegment, CcRosLookupCacheSegment,
CcRosMarkDirtyCacheSegment, CcRosUnmapCacheSegment,
CcRosCreateCacheSegment, CcRosGetCacheSegmentChain,
CcRosGetCacheSegment, CcRosRequestCacheSegment, CcFlushCache,
CcRosDeleteFileCache, CcRosReferenceCache, CcRosDereferenceCache,
CcRosReleaseFileCache, CcGetFileObjectFromSectionPtrs): Ditto.
* ntoskrnl/mm/section.c (MiReadPage): Assert if Fcb->Bcb is NULL.
(MmCreateDataFileSection): Make sure caching is initialized for the file
stream.
svn path=/trunk/; revision=3879
2002-12-15 17:01:52 +00:00
|
|
|
|
2014-08-31 12:56:36 +00:00
|
|
|
DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap, FileOffset);
|
2002-08-14 20:58:39 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
|
2014-04-12 09:31:07 +00:00
|
|
|
if (Vacb == NULL)
|
2002-08-14 20:58:39 +00:00
|
|
|
{
|
2012-12-19 11:11:34 +00:00
|
|
|
KeBugCheck(CACHE_MANAGER);
|
2002-08-14 20:58:39 +00:00
|
|
|
}
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
if (!Vacb->Dirty)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
|
2013-09-30 19:40:09 +00:00
|
|
|
DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2002-08-14 20:58:39 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
/* Move to the tail of the LRU list */
|
2014-04-12 09:31:07 +00:00
|
|
|
RemoveEntryList(&Vacb->VacbLruListEntry);
|
|
|
|
InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
|
2011-12-07 09:49:49 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
Vacb->Dirty = TRUE;
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2014-04-12 09:31:07 +00:00
|
|
|
KeReleaseMutex(&Vacb->Mutex, FALSE);
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
2001-12-31 01:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosUnmapVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset,
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN NowDirty)
|
2001-12-31 01:53:46 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB Vacb;
|
2012-12-19 11:11:34 +00:00
|
|
|
BOOLEAN WasDirty;
|
|
|
|
KIRQL oldIrql;
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2002-12-15 Casper S. Hornstrup <chorns@users.sourceforge.net>
* drivers/fs/vfat/cleanup.c (VfatCleanupFile): Only uninitialize caching
when initialized.
* drivers/fs/vfat/fcb.c (vfatReleaseFCB): Ditto.
* lib/kernel32/mem/section.c (CreateFileMappingW): Pass NULL as
MaximumSize to NtCreateSection if dwMaximumSizeHigh and dwMaximumSizeLow
are both 0.
* ntoskrnl/cc/pin.c (CcMapData): Assert if Bcb is NULL.
* ntoskrnl/cc/view.c (CcRosReleaseCacheSegment, CcRosLookupCacheSegment,
CcRosMarkDirtyCacheSegment, CcRosUnmapCacheSegment,
CcRosCreateCacheSegment, CcRosGetCacheSegmentChain,
CcRosGetCacheSegment, CcRosRequestCacheSegment, CcFlushCache,
CcRosDeleteFileCache, CcRosReferenceCache, CcRosDereferenceCache,
CcRosReleaseFileCache, CcGetFileObjectFromSectionPtrs): Ditto.
* ntoskrnl/mm/section.c (MiReadPage): Assert if Fcb->Bcb is NULL.
(MmCreateDataFileSection): Make sure caching is initialized for the file
stream.
svn path=/trunk/; revision=3879
2002-12-15 17:01:52 +00:00
|
|
|
|
2014-08-31 12:56:36 +00:00
|
|
|
DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap, FileOffset, NowDirty);
|
2001-12-31 01:53:46 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
|
2014-04-12 09:31:07 +00:00
|
|
|
if (Vacb == NULL)
|
2001-12-31 01:53:46 +00:00
|
|
|
{
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_UNSUCCESSFUL;
|
2001-12-31 01:53:46 +00:00
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
2011-12-24 03:57:10 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
WasDirty = Vacb->Dirty;
|
|
|
|
Vacb->Dirty = Vacb->Dirty || NowDirty;
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
Vacb->MappedCount--;
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
if (!WasDirty && NowDirty)
|
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
|
2013-09-30 19:40:09 +00:00
|
|
|
DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
if (!WasDirty && NowDirty)
|
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbIncRefCount(Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2014-04-12 09:31:07 +00:00
|
|
|
if (Vacb->MappedCount == 0)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2014-04-12 09:31:07 +00:00
|
|
|
KeReleaseMutex(&Vacb->Mutex, FALSE);
|
2011-12-19 08:22:16 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
2001-12-31 01:53:46 +00:00
|
|
|
}
|
|
|
|
|
2014-10-08 00:30:15 +00:00
|
|
|
static
|
2014-10-08 00:30:30 +00:00
|
|
|
NTSTATUS
|
|
|
|
CcRosMapVacb(
|
|
|
|
PROS_VACB Vacb)
|
2014-10-08 00:30:15 +00:00
|
|
|
{
|
|
|
|
ULONG i;
|
|
|
|
NTSTATUS Status;
|
2014-10-08 00:30:30 +00:00
|
|
|
ULONG_PTR NumberOfPages;
|
2014-10-08 00:30:15 +00:00
|
|
|
|
2014-10-08 00:30:30 +00:00
|
|
|
/* Create a memory area. */
|
|
|
|
MmLockAddressSpace(MmGetKernelAddressSpace());
|
|
|
|
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
|
|
|
|
0, // nothing checks for VACB mareas, so set to 0
|
|
|
|
&Vacb->BaseAddress,
|
|
|
|
VACB_MAPPING_GRANULARITY,
|
|
|
|
PAGE_READWRITE,
|
|
|
|
(PMEMORY_AREA*)&Vacb->MemoryArea,
|
|
|
|
0,
|
|
|
|
PAGE_SIZE);
|
|
|
|
MmUnlockAddressSpace(MmGetKernelAddressSpace());
|
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
KeBugCheck(CACHE_MANAGER);
|
|
|
|
}
|
2014-10-08 00:30:15 +00:00
|
|
|
|
2014-10-08 00:30:30 +00:00
|
|
|
ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
|
|
|
|
ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
|
|
|
|
|
|
|
|
/* Create a virtual mapping for this memory area */
|
|
|
|
NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
|
|
|
|
for (i = 0; i < NumberOfPages; i++)
|
2014-10-08 00:30:15 +00:00
|
|
|
{
|
2014-10-08 00:30:30 +00:00
|
|
|
PFN_NUMBER PageFrameNumber;
|
2014-10-08 00:30:15 +00:00
|
|
|
|
2014-10-08 00:30:30 +00:00
|
|
|
Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
|
|
|
|
if (PageFrameNumber == 0)
|
2014-10-08 00:30:15 +00:00
|
|
|
{
|
|
|
|
DPRINT1("Unable to allocate page\n");
|
|
|
|
KeBugCheck(MEMORY_MANAGEMENT);
|
|
|
|
}
|
2014-10-08 00:30:30 +00:00
|
|
|
|
|
|
|
Status = MmCreateVirtualMapping(NULL,
|
|
|
|
(PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
|
|
|
|
PAGE_READWRITE,
|
|
|
|
&PageFrameNumber,
|
|
|
|
1);
|
2014-10-08 00:30:15 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
DPRINT1("Unable to create virtual mapping\n");
|
|
|
|
KeBugCheck(MEMORY_MANAGEMENT);
|
|
|
|
}
|
|
|
|
}
|
2014-10-08 00:30:30 +00:00
|
|
|
|
|
|
|
return STATUS_SUCCESS;
|
2014-10-08 00:30:15 +00:00
|
|
|
}
|
|
|
|
|
2008-12-03 17:28:59 +00:00
|
|
|
static
|
|
|
|
NTSTATUS
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosCreateVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset,
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB *Vacb)
|
2002-01-26 21:21:02 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
|
|
|
PROS_VACB previous;
|
2012-12-19 11:11:34 +00:00
|
|
|
PLIST_ENTRY current_entry;
|
|
|
|
NTSTATUS Status;
|
|
|
|
KIRQL oldIrql;
|
2004-10-22 20:19:58 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2002-12-15 Casper S. Hornstrup <chorns@users.sourceforge.net>
* drivers/fs/vfat/cleanup.c (VfatCleanupFile): Only uninitialize caching
when initialized.
* drivers/fs/vfat/fcb.c (vfatReleaseFCB): Ditto.
* lib/kernel32/mem/section.c (CreateFileMappingW): Pass NULL as
MaximumSize to NtCreateSection if dwMaximumSizeHigh and dwMaximumSizeLow
are both 0.
* ntoskrnl/cc/pin.c (CcMapData): Assert if Bcb is NULL.
* ntoskrnl/cc/view.c (CcRosReleaseCacheSegment, CcRosLookupCacheSegment,
CcRosMarkDirtyCacheSegment, CcRosUnmapCacheSegment,
CcRosCreateCacheSegment, CcRosGetCacheSegmentChain,
CcRosGetCacheSegment, CcRosRequestCacheSegment, CcFlushCache,
CcRosDeleteFileCache, CcRosReferenceCache, CcRosDereferenceCache,
CcRosReleaseFileCache, CcGetFileObjectFromSectionPtrs): Ditto.
* ntoskrnl/mm/section.c (MiReadPage): Assert if Fcb->Bcb is NULL.
(MmCreateDataFileSection): Make sure caching is initialized for the file
stream.
svn path=/trunk/; revision=3879
2002-12-15 17:01:52 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT("CcRosCreateVacb()\n");
|
2002-01-26 21:21:02 +00:00
|
|
|
|
2014-08-31 12:56:36 +00:00
|
|
|
if (FileOffset >= SharedCacheMap->FileSize.QuadPart)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
*Vacb = NULL;
|
2012-12-19 11:11:34 +00:00
|
|
|
return STATUS_INVALID_PARAMETER;
|
|
|
|
}
|
2003-06-06 21:02:42 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
|
2014-10-08 00:30:30 +00:00
|
|
|
current->BaseAddress = NULL;
|
2012-12-19 11:11:34 +00:00
|
|
|
current->Valid = FALSE;
|
|
|
|
current->Dirty = FALSE;
|
|
|
|
current->PageOut = FALSE;
|
2014-04-12 11:51:46 +00:00
|
|
|
current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
|
2014-04-12 10:59:48 +00:00
|
|
|
current->SharedCacheMap = SharedCacheMap;
|
2009-10-17 15:03:54 +00:00
|
|
|
#if DBG
|
2014-04-12 10:59:48 +00:00
|
|
|
if (SharedCacheMap->Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2005-08-13 13:16:16 +00:00
|
|
|
#endif
|
2012-12-19 11:11:34 +00:00
|
|
|
current->MappedCount = 0;
|
2014-04-12 09:31:07 +00:00
|
|
|
current->DirtyVacbListEntry.Flink = NULL;
|
|
|
|
current->DirtyVacbListEntry.Blink = NULL;
|
2012-12-19 11:11:34 +00:00
|
|
|
current->ReferenceCount = 1;
|
|
|
|
KeInitializeMutex(¤t->Mutex, 0);
|
2011-12-21 19:08:59 +00:00
|
|
|
KeWaitForSingleObject(¤t->Mutex,
|
|
|
|
Executive,
|
|
|
|
KernelMode,
|
|
|
|
FALSE,
|
|
|
|
NULL);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
*Vacb = current;
|
|
|
|
/* There is window between the call to CcRosLookupVacb
|
|
|
|
* and CcRosCreateVacb. We must check if a VACB for the
|
|
|
|
* file offset exist. If there is a VACB, we release
|
|
|
|
* our newly created VACB and return the existing one.
|
2012-12-19 11:11:34 +00:00
|
|
|
*/
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
|
|
|
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
2012-12-19 11:11:34 +00:00
|
|
|
previous = NULL;
|
2014-04-12 10:59:48 +00:00
|
|
|
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry,
|
|
|
|
ROS_VACB,
|
2014-04-12 10:59:48 +00:00
|
|
|
CacheMapVacbListEntry);
|
2014-04-12 11:51:46 +00:00
|
|
|
if (IsPointInRange(current->FileOffset.QuadPart,
|
|
|
|
VACB_MAPPING_GRANULARITY,
|
2014-04-12 09:31:07 +00:00
|
|
|
FileOffset))
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbIncRefCount(current);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
2012-12-19 11:11:34 +00:00
|
|
|
#if DBG
|
2014-04-12 10:59:48 +00:00
|
|
|
if (SharedCacheMap->Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap,
|
2014-04-12 09:31:07 +00:00
|
|
|
(*Vacb),
|
2014-04-12 10:59:48 +00:00
|
|
|
current);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
#endif
|
2014-04-12 09:31:07 +00:00
|
|
|
KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2014-04-12 09:31:07 +00:00
|
|
|
ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
|
|
|
|
*Vacb = current;
|
2012-12-19 11:11:34 +00:00
|
|
|
KeWaitForSingleObject(¤t->Mutex,
|
|
|
|
Executive,
|
|
|
|
KernelMode,
|
|
|
|
FALSE,
|
|
|
|
NULL);
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
2014-04-12 11:51:46 +00:00
|
|
|
if (current->FileOffset.QuadPart < FileOffset)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2013-09-29 21:08:15 +00:00
|
|
|
ASSERT(previous == NULL ||
|
2014-04-12 11:51:46 +00:00
|
|
|
previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
|
2013-09-29 21:08:15 +00:00
|
|
|
previous = current;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2014-04-12 11:51:46 +00:00
|
|
|
if (current->FileOffset.QuadPart > FileOffset)
|
2013-09-29 21:08:15 +00:00
|
|
|
break;
|
2012-12-19 11:11:34 +00:00
|
|
|
current_entry = current_entry->Flink;
|
|
|
|
}
|
2014-04-12 09:31:07 +00:00
|
|
|
/* There was no existing VACB. */
|
|
|
|
current = *Vacb;
|
2012-12-19 11:11:34 +00:00
|
|
|
if (previous)
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
2014-04-12 09:31:07 +00:00
|
|
|
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2005-05-09 01:38:29 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
MI_SET_USAGE(MI_USAGE_CACHE);
|
2010-11-02 15:16:22 +00:00
|
|
|
#if MI_TRACE_PFNS
|
2014-04-12 10:59:48 +00:00
|
|
|
if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-10-04 22:41:47 +00:00
|
|
|
PWCHAR pos = NULL;
|
|
|
|
ULONG len = 0;
|
2014-04-12 10:59:48 +00:00
|
|
|
pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
|
2012-12-19 11:11:34 +00:00
|
|
|
len = wcslen(pos) * sizeof(WCHAR);
|
|
|
|
if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
|
|
|
|
}
|
2010-11-02 15:16:22 +00:00
|
|
|
#endif
|
|
|
|
|
2014-10-08 00:30:30 +00:00
|
|
|
Status = CcRosMapVacb(current);
|
2008-03-08 16:47:41 +00:00
|
|
|
|
2014-10-08 00:30:30 +00:00
|
|
|
return Status;
|
2002-01-26 21:21:02 +00:00
|
|
|
}
|
|
|
|
|
2001-04-03 17:25:50 +00:00
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosGetVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset,
|
|
|
|
PLONGLONG BaseOffset,
|
2012-12-19 11:11:34 +00:00
|
|
|
PVOID* BaseAddress,
|
|
|
|
PBOOLEAN UptoDate,
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB *Vacb)
|
1998-12-21 15:48:21 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS Status;
|
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT("CcRosGetVacb()\n");
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
/*
|
2014-04-12 09:31:07 +00:00
|
|
|
* Look for a VACB already mapping the same data.
|
2012-12-19 11:11:34 +00:00
|
|
|
*/
|
2014-04-12 10:59:48 +00:00
|
|
|
current = CcRosLookupVacb(SharedCacheMap, FileOffset);
|
2012-12-19 11:11:34 +00:00
|
|
|
if (current == NULL)
|
|
|
|
{
|
|
|
|
/*
|
2014-04-12 09:31:07 +00:00
|
|
|
* Otherwise create a new VACB.
|
2012-12-19 11:11:34 +00:00
|
|
|
*/
|
2014-04-12 10:59:48 +00:00
|
|
|
Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t);
|
2012-12-19 11:11:34 +00:00
|
|
|
if (!NT_SUCCESS(Status))
|
|
|
|
{
|
|
|
|
return Status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
|
|
|
|
|
|
|
/* Move to the tail of the LRU list */
|
2014-04-12 09:31:07 +00:00
|
|
|
RemoveEntryList(¤t->VacbLruListEntry);
|
|
|
|
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
|
|
|
|
|
|
|
/*
|
2014-04-12 09:31:07 +00:00
|
|
|
* Return information about the VACB to the caller.
|
2012-12-19 11:11:34 +00:00
|
|
|
*/
|
|
|
|
*UptoDate = current->Valid;
|
|
|
|
*BaseAddress = current->BaseAddress;
|
|
|
|
DPRINT("*BaseAddress %p\n", *BaseAddress);
|
2014-04-12 09:31:07 +00:00
|
|
|
*Vacb = current;
|
2014-04-12 11:51:46 +00:00
|
|
|
*BaseOffset = current->FileOffset.QuadPart;
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
1999-05-29 00:15:17 +00:00
|
|
|
}
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosRequestVacb (
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG FileOffset,
|
2012-12-19 11:11:34 +00:00
|
|
|
PVOID* BaseAddress,
|
|
|
|
PBOOLEAN UptoDate,
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB *Vacb)
|
2001-04-03 17:25:50 +00:00
|
|
|
/*
|
2014-04-12 10:59:48 +00:00
|
|
|
* FUNCTION: Request a page mapping for a shared cache map
|
2001-04-03 17:25:50 +00:00
|
|
|
*/
|
|
|
|
{
|
2014-08-31 12:56:36 +00:00
|
|
|
LONGLONG BaseOffset;
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2002-12-15 Casper S. Hornstrup <chorns@users.sourceforge.net>
* drivers/fs/vfat/cleanup.c (VfatCleanupFile): Only uninitialize caching
when initialized.
* drivers/fs/vfat/fcb.c (vfatReleaseFCB): Ditto.
* lib/kernel32/mem/section.c (CreateFileMappingW): Pass NULL as
MaximumSize to NtCreateSection if dwMaximumSizeHigh and dwMaximumSizeLow
are both 0.
* ntoskrnl/cc/pin.c (CcMapData): Assert if Bcb is NULL.
* ntoskrnl/cc/view.c (CcRosReleaseCacheSegment, CcRosLookupCacheSegment,
CcRosMarkDirtyCacheSegment, CcRosUnmapCacheSegment,
CcRosCreateCacheSegment, CcRosGetCacheSegmentChain,
CcRosGetCacheSegment, CcRosRequestCacheSegment, CcFlushCache,
CcRosDeleteFileCache, CcRosReferenceCache, CcRosDereferenceCache,
CcRosReleaseFileCache, CcGetFileObjectFromSectionPtrs): Ditto.
* ntoskrnl/mm/section.c (MiReadPage): Assert if Fcb->Bcb is NULL.
(MmCreateDataFileSection): Make sure caching is initialized for the file
stream.
svn path=/trunk/; revision=3879
2002-12-15 17:01:52 +00:00
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
|
2001-04-03 17:25:50 +00:00
|
|
|
{
|
2014-08-31 12:56:36 +00:00
|
|
|
DPRINT1("Bad fileoffset %I64x should be multiple of %x",
|
2013-09-30 19:40:09 +00:00
|
|
|
FileOffset, VACB_MAPPING_GRANULARITY);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeBugCheck(CACHE_MANAGER);
|
2001-04-03 17:25:50 +00:00
|
|
|
}
|
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
return CcRosGetVacb(SharedCacheMap,
|
2014-04-12 09:31:07 +00:00
|
|
|
FileOffset,
|
|
|
|
&BaseOffset,
|
|
|
|
BaseAddress,
|
|
|
|
UptoDate,
|
|
|
|
Vacb);
|
2001-04-03 17:25:50 +00:00
|
|
|
}
|
2014-04-06 10:52:17 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
static
|
|
|
|
VOID
|
|
|
|
CcFreeCachePage (
|
|
|
|
PVOID Context,
|
|
|
|
MEMORY_AREA* MemoryArea,
|
|
|
|
PVOID Address,
|
|
|
|
PFN_NUMBER Page,
|
|
|
|
SWAPENTRY SwapEntry,
|
|
|
|
BOOLEAN Dirty)
|
2001-02-10 22:51:11 +00:00
|
|
|
{
|
2012-12-19 11:11:34 +00:00
|
|
|
ASSERT(SwapEntry == 0);
|
|
|
|
if (Page != 0)
|
2001-02-10 22:51:11 +00:00
|
|
|
{
|
2011-12-19 02:56:58 +00:00
|
|
|
ASSERT(MmGetReferenceCountPage(Page) == 1);
|
2012-12-19 11:11:34 +00:00
|
|
|
MmReleasePageMemoryConsumer(MC_CACHE, Page);
|
2001-02-10 22:51:11 +00:00
|
|
|
}
|
|
|
|
}
|
2014-04-06 10:52:17 +00:00
|
|
|
|
2005-05-09 01:38:29 +00:00
|
|
|
NTSTATUS
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosInternalFreeVacb (
|
|
|
|
PROS_VACB Vacb)
|
2000-12-10 23:42:01 +00:00
|
|
|
/*
|
2014-04-12 10:59:48 +00:00
|
|
|
* FUNCTION: Releases a VACB associated with a shared cache map
|
2000-12-10 23:42:01 +00:00
|
|
|
*/
|
1999-05-29 00:15:17 +00:00
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT("Freeing VACB 0x%p\n", Vacb);
|
2009-10-17 15:03:54 +00:00
|
|
|
#if DBG
|
2014-04-12 10:59:48 +00:00
|
|
|
if (Vacb->SharedCacheMap->Trace)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2005-08-13 13:16:16 +00:00
|
|
|
#endif
|
2003-01-11 15:24:38 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
MmLockAddressSpace(MmGetKernelAddressSpace());
|
|
|
|
MmFreeMemoryArea(MmGetKernelAddressSpace(),
|
2014-04-12 09:31:07 +00:00
|
|
|
Vacb->MemoryArea,
|
2012-12-19 11:11:34 +00:00
|
|
|
CcFreeCachePage,
|
|
|
|
NULL);
|
|
|
|
MmUnlockAddressSpace(MmGetKernelAddressSpace());
|
2014-04-06 10:52:17 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
1999-05-29 00:15:17 +00:00
|
|
|
}
|
|
|
|
|
2003-07-10 06:27:13 +00:00
|
|
|
/*
|
|
|
|
* @implemented
|
|
|
|
*/
|
2012-12-19 11:11:34 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
CcFlushCache (
|
|
|
|
IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
|
|
|
|
IN PLARGE_INTEGER FileOffset OPTIONAL,
|
|
|
|
IN ULONG Length,
|
|
|
|
OUT PIO_STATUS_BLOCK IoStatus)
|
2002-10-02 19:20:51 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2011-12-17 12:59:01 +00:00
|
|
|
LARGE_INTEGER Offset;
|
2014-08-31 11:47:41 +00:00
|
|
|
LONGLONG RemainingLength;
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
2011-12-17 12:59:01 +00:00
|
|
|
NTSTATUS Status;
|
|
|
|
KIRQL oldIrql;
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-11-29 21:43:39 +00:00
|
|
|
CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
|
|
|
|
SectionObjectPointers, FileOffset, Length);
|
|
|
|
|
2013-02-16 17:37:17 +00:00
|
|
|
DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
|
2002-10-02 19:20:51 +00:00
|
|
|
SectionObjectPointers, FileOffset, Length, IoStatus);
|
|
|
|
|
2011-12-17 12:59:01 +00:00
|
|
|
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = SectionObjectPointers->SharedCacheMap;
|
|
|
|
ASSERT(SharedCacheMap);
|
2011-12-17 12:59:01 +00:00
|
|
|
if (FileOffset)
|
|
|
|
{
|
|
|
|
Offset = *FileOffset;
|
2014-08-31 11:47:41 +00:00
|
|
|
RemainingLength = Length;
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-08-31 11:47:41 +00:00
|
|
|
Offset.QuadPart = 0;
|
|
|
|
RemainingLength = SharedCacheMap->FileSize.QuadPart;
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
2005-05-09 01:38:29 +00:00
|
|
|
|
2011-12-17 12:59:01 +00:00
|
|
|
if (IoStatus)
|
|
|
|
{
|
|
|
|
IoStatus->Status = STATUS_SUCCESS;
|
|
|
|
IoStatus->Information = 0;
|
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2014-08-31 11:47:41 +00:00
|
|
|
while (RemainingLength > 0)
|
2011-12-17 12:59:01 +00:00
|
|
|
{
|
2014-08-31 12:56:36 +00:00
|
|
|
current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
|
2011-12-17 12:59:01 +00:00
|
|
|
if (current != NULL)
|
|
|
|
{
|
|
|
|
if (current->Dirty)
|
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
Status = CcRosFlushVacb(current);
|
2011-12-17 12:59:01 +00:00
|
|
|
if (!NT_SUCCESS(Status) && IoStatus != NULL)
|
|
|
|
{
|
|
|
|
IoStatus->Status = Status;
|
|
|
|
}
|
|
|
|
}
|
2013-09-29 20:52:23 +00:00
|
|
|
KeReleaseMutex(¤t->Mutex, FALSE);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
2011-12-24 03:57:10 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosVacbDecRefCount(current);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
2011-12-24 03:57:10 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
|
|
|
|
2013-09-30 19:40:09 +00:00
|
|
|
Offset.QuadPart += VACB_MAPPING_GRANULARITY;
|
2014-08-31 11:47:41 +00:00
|
|
|
RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (IoStatus)
|
|
|
|
{
|
|
|
|
IoStatus->Status = STATUS_INVALID_PARAMETER;
|
|
|
|
}
|
|
|
|
}
|
2002-10-02 19:20:51 +00:00
|
|
|
}
|
|
|
|
|
2005-05-09 01:38:29 +00:00
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
CcRosDeleteFileCache (
|
|
|
|
PFILE_OBJECT FileObject,
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap)
|
2000-12-10 23:42:01 +00:00
|
|
|
/*
|
2014-04-12 10:59:48 +00:00
|
|
|
* FUNCTION: Releases the shared cache map associated with a file object
|
2000-12-10 23:42:01 +00:00
|
|
|
*/
|
1999-05-29 00:15:17 +00:00
|
|
|
{
|
2011-12-17 12:59:01 +00:00
|
|
|
PLIST_ENTRY current_entry;
|
2014-04-12 09:31:07 +00:00
|
|
|
PROS_VACB current;
|
2011-12-17 12:59:01 +00:00
|
|
|
LIST_ENTRY FreeList;
|
|
|
|
KIRQL oldIrql;
|
2001-03-09 14:40:28 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
ASSERT(SharedCacheMap);
|
2004-10-22 20:19:58 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap->RefCount++;
|
2011-12-17 12:59:01 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2002-07-17 21:04:57 +00:00
|
|
|
|
2011-12-17 12:59:01 +00:00
|
|
|
CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2011-12-17 12:59:01 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap->RefCount--;
|
|
|
|
if (SharedCacheMap->RefCount == 0)
|
2011-12-17 12:59:01 +00:00
|
|
|
{
|
|
|
|
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2011-12-17 12:59:01 +00:00
|
|
|
/*
|
2014-04-12 09:31:07 +00:00
|
|
|
* Release all VACBs
|
2011-12-17 12:59:01 +00:00
|
|
|
*/
|
|
|
|
InitializeListHead(&FreeList);
|
2014-04-12 10:59:48 +00:00
|
|
|
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
|
|
|
while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
|
2011-12-17 12:59:01 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
|
|
|
|
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
|
2014-04-12 09:31:07 +00:00
|
|
|
RemoveEntryList(¤t->VacbLruListEntry);
|
2011-12-17 12:59:01 +00:00
|
|
|
if (current->Dirty)
|
|
|
|
{
|
2014-04-12 09:31:07 +00:00
|
|
|
RemoveEntryList(¤t->DirtyVacbListEntry);
|
2013-09-30 19:40:09 +00:00
|
|
|
DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
2014-04-12 09:31:07 +00:00
|
|
|
DPRINT1("Freeing dirty VACB\n");
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
2014-04-12 10:59:48 +00:00
|
|
|
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
2009-10-17 15:03:54 +00:00
|
|
|
#if DBG
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap->Trace = FALSE;
|
2005-08-13 13:16:16 +00:00
|
|
|
#endif
|
2014-04-12 10:59:48 +00:00
|
|
|
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
2003-01-11 15:24:38 +00:00
|
|
|
|
2011-12-17 12:59:01 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
ObDereferenceObject(SharedCacheMap->FileObject);
|
2003-01-11 15:24:38 +00:00
|
|
|
|
2011-12-17 12:59:01 +00:00
|
|
|
while (!IsListEmpty(&FreeList))
|
|
|
|
{
|
|
|
|
current_entry = RemoveTailList(&FreeList);
|
2014-04-12 10:59:48 +00:00
|
|
|
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
|
2014-04-12 09:31:07 +00:00
|
|
|
CcRosInternalFreeVacb(current);
|
2011-12-17 12:59:01 +00:00
|
|
|
}
|
2014-04-12 10:59:48 +00:00
|
|
|
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
|
2011-12-17 12:59:01 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
|
|
|
}
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
1998-12-21 15:48:21 +00:00
|
|
|
}
|
|
|
|
|
2005-09-14 01:44:19 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
CcRosReferenceCache (
|
|
|
|
PFILE_OBJECT FileObject)
|
2002-08-17 15:14:26 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2012-12-19 11:11:34 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
|
|
|
ASSERT(SharedCacheMap);
|
|
|
|
ASSERT(SharedCacheMap->RefCount != 0);
|
|
|
|
SharedCacheMap->RefCount++;
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2002-08-17 15:14:26 +00:00
|
|
|
}
|
|
|
|
|
2005-09-14 01:44:19 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
2014-04-12 10:37:08 +00:00
|
|
|
CcRosRemoveIfClosed (
|
2012-12-19 11:11:34 +00:00
|
|
|
PSECTION_OBJECT_POINTERS SectionObjectPointer)
|
2003-01-11 15:24:38 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2014-04-12 10:37:08 +00:00
|
|
|
DPRINT("CcRosRemoveIfClosed()\n");
|
2012-12-19 11:11:34 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = SectionObjectPointer->SharedCacheMap;
|
|
|
|
if (SharedCacheMap && SharedCacheMap->RefCount == 0)
|
2003-01-11 15:24:38 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
|
2003-01-11 15:24:38 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2003-01-11 15:24:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-14 01:44:19 +00:00
|
|
|
VOID
|
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
CcRosDereferenceCache (
|
|
|
|
PFILE_OBJECT FileObject)
|
2002-08-17 15:14:26 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2012-12-19 11:11:34 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
|
|
|
ASSERT(SharedCacheMap);
|
|
|
|
if (SharedCacheMap->RefCount > 0)
|
2003-01-11 15:24:38 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap->RefCount--;
|
|
|
|
if (SharedCacheMap->RefCount == 0)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
MmFreeSectionSegments(SharedCacheMap->FileObject);
|
|
|
|
CcRosDeleteFileCache(FileObject, SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2003-01-11 15:24:38 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2002-08-17 15:14:26 +00:00
|
|
|
}
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
CcRosReleaseFileCache (
|
|
|
|
PFILE_OBJECT FileObject)
|
2002-08-08 17:54:16 +00:00
|
|
|
/*
|
|
|
|
* FUNCTION: Called by the file system when a handle to a file object
|
|
|
|
* has been closed.
|
|
|
|
*/
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2002-12-15 Casper S. Hornstrup <chorns@users.sourceforge.net>
* drivers/fs/vfat/cleanup.c (VfatCleanupFile): Only uninitialize caching
when initialized.
* drivers/fs/vfat/fcb.c (vfatReleaseFCB): Ditto.
* lib/kernel32/mem/section.c (CreateFileMappingW): Pass NULL as
MaximumSize to NtCreateSection if dwMaximumSizeHigh and dwMaximumSizeLow
are both 0.
* ntoskrnl/cc/pin.c (CcMapData): Assert if Bcb is NULL.
* ntoskrnl/cc/view.c (CcRosReleaseCacheSegment, CcRosLookupCacheSegment,
CcRosMarkDirtyCacheSegment, CcRosUnmapCacheSegment,
CcRosCreateCacheSegment, CcRosGetCacheSegmentChain,
CcRosGetCacheSegment, CcRosRequestCacheSegment, CcFlushCache,
CcRosDeleteFileCache, CcRosReferenceCache, CcRosDereferenceCache,
CcRosReleaseFileCache, CcGetFileObjectFromSectionPtrs): Ditto.
* ntoskrnl/mm/section.c (MiReadPage): Assert if Fcb->Bcb is NULL.
(MmCreateDataFileSection): Make sure caching is initialized for the file
stream.
svn path=/trunk/; revision=3879
2002-12-15 17:01:52 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
|
2002-08-17 15:14:26 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
2012-12-19 11:11:34 +00:00
|
|
|
if (FileObject->PrivateCacheMap != NULL)
|
|
|
|
{
|
|
|
|
FileObject->PrivateCacheMap = NULL;
|
2014-04-12 10:59:48 +00:00
|
|
|
if (SharedCacheMap->RefCount > 0)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap->RefCount--;
|
|
|
|
if (SharedCacheMap->RefCount == 0)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
MmFreeSectionSegments(SharedCacheMap->FileObject);
|
|
|
|
CcRosDeleteFileCache(FileObject, SharedCacheMap);
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2002-08-17 15:14:26 +00:00
|
|
|
}
|
2012-12-19 11:11:34 +00:00
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
2002-08-08 17:54:16 +00:00
|
|
|
}
|
|
|
|
|
2005-05-09 01:38:29 +00:00
|
|
|
NTSTATUS
|
2005-09-14 01:44:19 +00:00
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
CcTryToInitializeFileCache (
|
|
|
|
PFILE_OBJECT FileObject)
|
2003-06-27 21:28:30 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS Status;
|
|
|
|
|
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
|
|
|
|
|
|
|
ASSERT(FileObject->SectionObjectPointer);
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
|
|
|
if (SharedCacheMap == NULL)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
|
|
|
Status = STATUS_UNSUCCESSFUL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (FileObject->PrivateCacheMap == NULL)
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
FileObject->PrivateCacheMap = SharedCacheMap;
|
|
|
|
SharedCacheMap->RefCount++;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
Status = STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
|
|
|
|
|
|
|
return Status;
|
2003-06-27 21:28:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
NTSTATUS
|
|
|
|
NTAPI
|
|
|
|
CcRosInitializeFileCache (
|
|
|
|
PFILE_OBJECT FileObject,
|
2014-08-17 21:47:43 +00:00
|
|
|
PCC_FILE_SIZES FileSizes,
|
2012-12-19 11:11:34 +00:00
|
|
|
PCACHE_MANAGER_CALLBACKS CallBacks,
|
|
|
|
PVOID LazyWriterContext)
|
2000-12-10 23:42:01 +00:00
|
|
|
/*
|
2014-04-12 10:59:48 +00:00
|
|
|
* FUNCTION: Initializes a shared cache map for a file object
|
2000-12-10 23:42:01 +00:00
|
|
|
*/
|
2002-08-17 15:14:26 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2004-08-10 01:49:37 +00:00
|
|
|
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
|
|
|
DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
|
|
|
|
FileObject, SharedCacheMap);
|
2002-10-02 19:20:51 +00:00
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
KeAcquireGuardedMutex(&ViewLock);
|
2014-04-12 10:59:48 +00:00
|
|
|
if (SharedCacheMap == NULL)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
|
|
|
|
if (SharedCacheMap == NULL)
|
2012-12-19 11:11:34 +00:00
|
|
|
{
|
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
2015-06-21 09:20:01 +00:00
|
|
|
return STATUS_INSUFFICIENT_RESOURCES;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
2014-04-12 10:59:48 +00:00
|
|
|
RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
|
2012-12-19 11:11:34 +00:00
|
|
|
ObReferenceObjectByPointer(FileObject,
|
|
|
|
FILE_ALL_ACCESS,
|
|
|
|
NULL,
|
|
|
|
KernelMode);
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap->FileObject = FileObject;
|
|
|
|
SharedCacheMap->Callbacks = CallBacks;
|
|
|
|
SharedCacheMap->LazyWriteContext = LazyWriterContext;
|
2014-08-17 21:47:43 +00:00
|
|
|
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
|
|
|
|
SharedCacheMap->FileSize = FileSizes->FileSize;
|
2014-04-12 10:59:48 +00:00
|
|
|
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
|
|
|
|
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
|
|
|
|
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
if (FileObject->PrivateCacheMap == NULL)
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
FileObject->PrivateCacheMap = SharedCacheMap;
|
|
|
|
SharedCacheMap->RefCount++;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
KeReleaseGuardedMutex(&ViewLock);
|
|
|
|
|
2013-09-30 20:06:18 +00:00
|
|
|
return STATUS_SUCCESS;
|
1998-12-21 15:48:21 +00:00
|
|
|
}
|
1999-02-01 20:58:37 +00:00
|
|
|
|
2003-07-10 06:27:13 +00:00
|
|
|
/*
|
|
|
|
* @implemented
|
|
|
|
*/
|
2012-12-19 11:11:34 +00:00
|
|
|
PFILE_OBJECT
|
|
|
|
NTAPI
|
|
|
|
CcGetFileObjectFromSectionPtrs (
|
|
|
|
IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
|
2002-10-02 19:20:51 +00:00
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
2014-11-29 21:43:39 +00:00
|
|
|
|
|
|
|
CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
|
|
|
|
|
2012-12-19 11:11:34 +00:00
|
|
|
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
|
|
|
|
{
|
2014-04-12 10:59:48 +00:00
|
|
|
SharedCacheMap = SectionObjectPointers->SharedCacheMap;
|
|
|
|
ASSERT(SharedCacheMap);
|
|
|
|
return SharedCacheMap->FileObject;
|
2012-12-19 11:11:34 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
2002-10-02 19:20:51 +00:00
|
|
|
}
|
|
|
|
|
2005-09-14 01:44:19 +00:00
|
|
|
VOID
|
|
|
|
INIT_FUNCTION
|
|
|
|
NTAPI
|
2012-12-19 11:11:34 +00:00
|
|
|
CcInitView (
|
|
|
|
VOID)
|
2001-04-09 02:45:04 +00:00
|
|
|
{
|
2012-12-19 11:11:34 +00:00
|
|
|
DPRINT("CcInitView()\n");
|
2003-01-11 15:24:38 +00:00
|
|
|
|
2014-04-12 09:31:07 +00:00
|
|
|
InitializeListHead(&DirtyVacbListHead);
|
|
|
|
InitializeListHead(&VacbLruListHead);
|
2012-12-19 11:11:34 +00:00
|
|
|
KeInitializeGuardedMutex(&ViewLock);
|
2014-04-12 11:12:14 +00:00
|
|
|
ExInitializeNPagedLookasideList(&iBcbLookasideList,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
sizeof(INTERNAL_BCB),
|
|
|
|
TAG_BCB,
|
|
|
|
20);
|
|
|
|
ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
sizeof(ROS_SHARED_CACHE_MAP),
|
|
|
|
TAG_SHARED_CACHE_MAP,
|
|
|
|
20);
|
|
|
|
ExInitializeNPagedLookasideList(&VacbLookasideList,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
sizeof(ROS_VACB),
|
|
|
|
TAG_VACB,
|
|
|
|
20);
|
2012-12-19 11:11:34 +00:00
|
|
|
|
|
|
|
MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
|
|
|
|
|
|
|
|
CcInitCacheZeroPage();
|
2001-04-09 02:45:04 +00:00
|
|
|
}
|
2000-03-05 19:17:43 +00:00
|
|
|
|
2000-02-26 22:41:35 +00:00
|
|
|
/* EOF */
|