mirror of
https://github.com/reactos/reactos.git
synced 2025-02-23 08:55:19 +00:00
[NTOSKRNL]
- BCB::CacheSegmentSize has no reason to vary at runtime. Replace it with the constant VACB_MAPPING_GRANULARITY. svn path=/trunk/; revision=60481
This commit is contained in:
parent
66e5c9bbf7
commit
1066f92fd3
5 changed files with 62 additions and 62 deletions
|
@ -87,7 +87,7 @@ ReadCacheSegmentChain (
|
|||
*/
|
||||
if (current->Valid)
|
||||
{
|
||||
TempLength = min(Bcb->CacheSegmentSize, Length);
|
||||
TempLength = min(VACB_MAPPING_GRANULARITY, Length);
|
||||
memcpy(Buffer, current->BaseAddress, TempLength);
|
||||
|
||||
Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
|
||||
|
@ -116,7 +116,7 @@ ReadCacheSegmentChain (
|
|||
while ((current2 != NULL) && !current2->Valid && (current_size < MAX_RW_LENGTH))
|
||||
{
|
||||
current2 = current2->NextInChain;
|
||||
current_size += Bcb->CacheSegmentSize;
|
||||
current_size += VACB_MAPPING_GRANULARITY;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -130,12 +130,12 @@ ReadCacheSegmentChain (
|
|||
while ((current2 != NULL) && !current2->Valid && (current_size < MAX_RW_LENGTH))
|
||||
{
|
||||
PVOID address = current2->BaseAddress;
|
||||
for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++, address = RVA(address, PAGE_SIZE))
|
||||
for (i = 0; i < (VACB_MAPPING_GRANULARITY / PAGE_SIZE); i++, address = RVA(address, PAGE_SIZE))
|
||||
{
|
||||
*MdlPages++ = MmGetPfnForProcess(NULL, address);
|
||||
}
|
||||
current2 = current2->NextInChain;
|
||||
current_size += Bcb->CacheSegmentSize;
|
||||
current_size += VACB_MAPPING_GRANULARITY;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -172,14 +172,14 @@ ReadCacheSegmentChain (
|
|||
{
|
||||
previous = current;
|
||||
current = current->NextInChain;
|
||||
TempLength = min(Bcb->CacheSegmentSize, Length);
|
||||
TempLength = min(VACB_MAPPING_GRANULARITY, Length);
|
||||
memcpy(Buffer, previous->BaseAddress, TempLength);
|
||||
|
||||
Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
|
||||
|
||||
Length = Length - TempLength;
|
||||
CcRosReleaseCacheSegment(Bcb, previous, TRUE, FALSE, FALSE);
|
||||
current_size += Bcb->CacheSegmentSize;
|
||||
current_size += VACB_MAPPING_GRANULARITY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -200,9 +200,9 @@ ReadCacheSegment (
|
|||
|
||||
SegOffset.QuadPart = CacheSeg->FileOffset;
|
||||
Size = (ULONG)(CacheSeg->Bcb->AllocationSize.QuadPart - CacheSeg->FileOffset);
|
||||
if (Size > CacheSeg->Bcb->CacheSegmentSize)
|
||||
if (Size > VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
Size = CacheSeg->Bcb->CacheSegmentSize;
|
||||
Size = VACB_MAPPING_GRANULARITY;
|
||||
}
|
||||
|
||||
Mdl = IoAllocateMdl(CacheSeg->BaseAddress, Size, FALSE, FALSE, NULL);
|
||||
|
@ -229,10 +229,10 @@ ReadCacheSegment (
|
|||
return Status;
|
||||
}
|
||||
|
||||
if (CacheSeg->Bcb->CacheSegmentSize > Size)
|
||||
if (VACB_MAPPING_GRANULARITY > Size)
|
||||
{
|
||||
RtlZeroMemory((char*)CacheSeg->BaseAddress + Size,
|
||||
CacheSeg->Bcb->CacheSegmentSize - Size);
|
||||
VACB_MAPPING_GRANULARITY - Size);
|
||||
}
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
|
@ -253,9 +253,9 @@ WriteCacheSegment (
|
|||
CacheSeg->Dirty = FALSE;
|
||||
SegOffset.QuadPart = CacheSeg->FileOffset;
|
||||
Size = (ULONG)(CacheSeg->Bcb->AllocationSize.QuadPart - CacheSeg->FileOffset);
|
||||
if (Size > CacheSeg->Bcb->CacheSegmentSize)
|
||||
if (Size > VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
Size = CacheSeg->Bcb->CacheSegmentSize;
|
||||
Size = VACB_MAPPING_GRANULARITY;
|
||||
}
|
||||
//
|
||||
// Nonpaged pool PDEs in ReactOS must actually be synchronized between the
|
||||
|
@ -363,7 +363,7 @@ CcCopyRead (
|
|||
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
|
||||
BcbSegmentListEntry);
|
||||
if (!current->Valid &&
|
||||
DoSegmentsIntersect(current->FileOffset, Bcb->CacheSegmentSize,
|
||||
DoSegmentsIntersect(current->FileOffset, VACB_MAPPING_GRANULARITY,
|
||||
ReadOffset, Length))
|
||||
{
|
||||
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
|
||||
|
@ -378,13 +378,13 @@ CcCopyRead (
|
|||
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
|
||||
}
|
||||
|
||||
TempLength = ReadOffset % Bcb->CacheSegmentSize;
|
||||
TempLength = ReadOffset % VACB_MAPPING_GRANULARITY;
|
||||
if (TempLength != 0)
|
||||
{
|
||||
TempLength = min (Length, Bcb->CacheSegmentSize - TempLength);
|
||||
TempLength = min (Length, VACB_MAPPING_GRANULARITY - TempLength);
|
||||
Status = CcRosRequestCacheSegment(Bcb,
|
||||
ROUND_DOWN(ReadOffset,
|
||||
Bcb->CacheSegmentSize),
|
||||
VACB_MAPPING_GRANULARITY),
|
||||
&BaseAddress, &Valid, &CacheSeg);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
|
@ -404,7 +404,7 @@ CcCopyRead (
|
|||
return FALSE;
|
||||
}
|
||||
}
|
||||
memcpy (Buffer, (char*)BaseAddress + ReadOffset % Bcb->CacheSegmentSize,
|
||||
memcpy (Buffer, (char*)BaseAddress + ReadOffset % VACB_MAPPING_GRANULARITY,
|
||||
TempLength);
|
||||
CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, FALSE, FALSE);
|
||||
ReadLength += TempLength;
|
||||
|
@ -415,7 +415,7 @@ CcCopyRead (
|
|||
|
||||
while (Length > 0)
|
||||
{
|
||||
TempLength = min(max(Bcb->CacheSegmentSize, MAX_RW_LENGTH), Length);
|
||||
TempLength = min(max(VACB_MAPPING_GRANULARITY, MAX_RW_LENGTH), Length);
|
||||
Status = ReadCacheSegmentChain(Bcb, ReadOffset, TempLength, Buffer);
|
||||
if (!NT_SUCCESS(Status))
|
||||
{
|
||||
|
@ -479,7 +479,7 @@ CcCopyWrite (
|
|||
CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
|
||||
BcbSegmentListEntry);
|
||||
if (!CacheSeg->Valid &&
|
||||
DoSegmentsIntersect(CacheSeg->FileOffset, Bcb->CacheSegmentSize,
|
||||
DoSegmentsIntersect(CacheSeg->FileOffset, VACB_MAPPING_GRANULARITY,
|
||||
WriteOffset, Length))
|
||||
{
|
||||
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
|
||||
|
@ -493,12 +493,12 @@ CcCopyWrite (
|
|||
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
|
||||
}
|
||||
|
||||
TempLength = WriteOffset % Bcb->CacheSegmentSize;
|
||||
TempLength = WriteOffset % VACB_MAPPING_GRANULARITY;
|
||||
if (TempLength != 0)
|
||||
{
|
||||
ULONG ROffset;
|
||||
ROffset = ROUND_DOWN(WriteOffset, Bcb->CacheSegmentSize);
|
||||
TempLength = min (Length, Bcb->CacheSegmentSize - TempLength);
|
||||
ROffset = ROUND_DOWN(WriteOffset, VACB_MAPPING_GRANULARITY);
|
||||
TempLength = min (Length, VACB_MAPPING_GRANULARITY - TempLength);
|
||||
Status = CcRosRequestCacheSegment(Bcb, ROffset,
|
||||
&BaseAddress, &Valid, &CacheSeg);
|
||||
if (!NT_SUCCESS(Status))
|
||||
|
@ -512,7 +512,7 @@ CcCopyWrite (
|
|||
return FALSE;
|
||||
}
|
||||
}
|
||||
memcpy ((char*)BaseAddress + WriteOffset % Bcb->CacheSegmentSize,
|
||||
memcpy ((char*)BaseAddress + WriteOffset % VACB_MAPPING_GRANULARITY,
|
||||
Buffer, TempLength);
|
||||
CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, TRUE, FALSE);
|
||||
|
||||
|
@ -524,7 +524,7 @@ CcCopyWrite (
|
|||
|
||||
while (Length > 0)
|
||||
{
|
||||
TempLength = min (Bcb->CacheSegmentSize, Length);
|
||||
TempLength = min (VACB_MAPPING_GRANULARITY, Length);
|
||||
Status = CcRosRequestCacheSegment(Bcb,
|
||||
WriteOffset,
|
||||
&BaseAddress,
|
||||
|
@ -534,7 +534,7 @@ CcCopyWrite (
|
|||
{
|
||||
return FALSE;
|
||||
}
|
||||
if (!Valid && TempLength < Bcb->CacheSegmentSize)
|
||||
if (!Valid && TempLength < VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
if (!NT_SUCCESS(ReadCacheSegment(CacheSeg)))
|
||||
{
|
||||
|
@ -699,7 +699,7 @@ CcZeroData (
|
|||
CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
|
||||
BcbSegmentListEntry);
|
||||
if (!CacheSeg->Valid &&
|
||||
DoSegmentsIntersect(CacheSeg->FileOffset, Bcb->CacheSegmentSize,
|
||||
DoSegmentsIntersect(CacheSeg->FileOffset, VACB_MAPPING_GRANULARITY,
|
||||
WriteOffset.u.LowPart, Length))
|
||||
{
|
||||
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
|
||||
|
@ -716,7 +716,7 @@ CcZeroData (
|
|||
while (Length > 0)
|
||||
{
|
||||
ULONG Offset;
|
||||
Offset = WriteOffset.u.LowPart % Bcb->CacheSegmentSize;
|
||||
Offset = WriteOffset.u.LowPart % VACB_MAPPING_GRANULARITY;
|
||||
if (Length + Offset > MAX_ZERO_LENGTH)
|
||||
{
|
||||
CurrentLength = MAX_ZERO_LENGTH - Offset;
|
||||
|
@ -735,9 +735,9 @@ CcZeroData (
|
|||
|
||||
while (current != NULL)
|
||||
{
|
||||
Offset = WriteOffset.u.LowPart % Bcb->CacheSegmentSize;
|
||||
Offset = WriteOffset.u.LowPart % VACB_MAPPING_GRANULARITY;
|
||||
if ((Offset != 0) ||
|
||||
(Offset + CurrentLength < Bcb->CacheSegmentSize))
|
||||
(Offset + CurrentLength < VACB_MAPPING_GRANULARITY))
|
||||
{
|
||||
if (!current->Valid)
|
||||
{
|
||||
|
@ -749,11 +749,11 @@ CcZeroData (
|
|||
Status);
|
||||
}
|
||||
}
|
||||
TempLength = min (CurrentLength, Bcb->CacheSegmentSize - Offset);
|
||||
TempLength = min (CurrentLength, VACB_MAPPING_GRANULARITY - Offset);
|
||||
}
|
||||
else
|
||||
{
|
||||
TempLength = Bcb->CacheSegmentSize;
|
||||
TempLength = VACB_MAPPING_GRANULARITY;
|
||||
}
|
||||
memset ((PUCHAR)current->BaseAddress + Offset, 0, TempLength);
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ CcSetFileSizes (
|
|||
if (current->Dirty)
|
||||
{
|
||||
RemoveEntryList(¤t->DirtySegmentListEntry);
|
||||
DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
}
|
||||
InsertHeadList(&FreeListHead, ¤t->BcbSegmentListEntry);
|
||||
}
|
||||
|
|
|
@ -57,12 +57,12 @@ CcMapData (
|
|||
Bcb->AllocationSize.QuadPart,
|
||||
Bcb->FileSize.QuadPart);
|
||||
|
||||
if (ReadOffset % Bcb->CacheSegmentSize + Length > Bcb->CacheSegmentSize)
|
||||
if (ReadOffset % VACB_MAPPING_GRANULARITY + Length > VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
ROffset = ROUND_DOWN(ReadOffset, Bcb->CacheSegmentSize);
|
||||
ROffset = ROUND_DOWN(ReadOffset, VACB_MAPPING_GRANULARITY);
|
||||
Status = CcRosRequestCacheSegment(Bcb,
|
||||
ROffset,
|
||||
pBuffer,
|
||||
|
@ -88,7 +88,7 @@ CcMapData (
|
|||
}
|
||||
}
|
||||
|
||||
*pBuffer = (PVOID)((ULONG_PTR)(*pBuffer) + (ReadOffset % Bcb->CacheSegmentSize));
|
||||
*pBuffer = (PVOID)((ULONG_PTR)(*pBuffer) + (ReadOffset % VACB_MAPPING_GRANULARITY));
|
||||
iBcb = ExAllocateFromNPagedLookasideList(&iBcbLookasideList);
|
||||
if (iBcb == NULL)
|
||||
{
|
||||
|
|
|
@ -164,7 +164,7 @@ CcRosFlushCacheSegment (
|
|||
|
||||
CacheSegment->Dirty = FALSE;
|
||||
RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
|
||||
DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
CcRosCacheSegmentDecRefCount(CacheSegment);
|
||||
|
||||
KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
|
||||
|
@ -243,7 +243,7 @@ CcRosFlushDirtyPages (
|
|||
continue;
|
||||
}
|
||||
|
||||
PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
|
||||
|
@ -329,7 +329,7 @@ retry:
|
|||
KeReleaseGuardedMutex(&ViewLock);
|
||||
|
||||
/* Page out the segment */
|
||||
for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
|
||||
for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
|
||||
{
|
||||
Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
|
||||
|
||||
|
@ -356,7 +356,7 @@ retry:
|
|||
InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
|
||||
|
||||
/* Calculate how many pages we freed for Mm */
|
||||
PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
PagesFreed = min(PagesPerSegment, Target);
|
||||
Target -= PagesFreed;
|
||||
(*NrFreed) += PagesFreed;
|
||||
|
@ -427,7 +427,7 @@ CcRosReleaseCacheSegment (
|
|||
if (!WasDirty && CacheSeg->Dirty)
|
||||
{
|
||||
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
|
||||
DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (Mapped)
|
||||
|
@ -474,7 +474,7 @@ CcRosLookupCacheSegment (
|
|||
{
|
||||
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
|
||||
BcbSegmentListEntry);
|
||||
if (IsPointInSegment(current->FileOffset, Bcb->CacheSegmentSize,
|
||||
if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
|
||||
FileOffset))
|
||||
{
|
||||
CcRosCacheSegmentIncRefCount(current);
|
||||
|
@ -523,7 +523,7 @@ CcRosMarkDirtyCacheSegment (
|
|||
if (!CacheSeg->Dirty)
|
||||
{
|
||||
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
|
||||
DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -576,7 +576,7 @@ CcRosUnmapCacheSegment (
|
|||
if (!WasDirty && NowDirty)
|
||||
{
|
||||
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
|
||||
DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
}
|
||||
|
||||
CcRosCacheSegmentDecRefCount(CacheSeg);
|
||||
|
@ -628,7 +628,7 @@ CcRosCreateCacheSegment (
|
|||
current->Valid = FALSE;
|
||||
current->Dirty = FALSE;
|
||||
current->PageOut = FALSE;
|
||||
current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
|
||||
current->FileOffset = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
|
||||
current->Bcb = Bcb;
|
||||
#if DBG
|
||||
if ( Bcb->Trace )
|
||||
|
@ -661,7 +661,7 @@ CcRosCreateCacheSegment (
|
|||
{
|
||||
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
|
||||
BcbSegmentListEntry);
|
||||
if (IsPointInSegment(current->FileOffset, Bcb->CacheSegmentSize,
|
||||
if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
|
||||
FileOffset))
|
||||
{
|
||||
CcRosCacheSegmentIncRefCount(current);
|
||||
|
@ -713,7 +713,7 @@ CcRosCreateCacheSegment (
|
|||
#ifdef CACHE_BITMAP
|
||||
KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
|
||||
|
||||
StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
|
||||
StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, VACB_MAPPING_GRANULARITY / PAGE_SIZE, CiCacheSegMappingRegionHint);
|
||||
|
||||
if (StartingOffset == 0xffffffff)
|
||||
{
|
||||
|
@ -725,7 +725,7 @@ CcRosCreateCacheSegment (
|
|||
|
||||
if (CiCacheSegMappingRegionHint == StartingOffset)
|
||||
{
|
||||
CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
CiCacheSegMappingRegionHint += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
}
|
||||
|
||||
KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
|
||||
|
@ -735,7 +735,7 @@ CcRosCreateCacheSegment (
|
|||
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
|
||||
0, // nothing checks for cache_segment mareas, so set to 0
|
||||
¤t->BaseAddress,
|
||||
Bcb->CacheSegmentSize,
|
||||
VACB_MAPPING_GRANULARITY,
|
||||
PAGE_READWRITE,
|
||||
(PMEMORY_AREA*)¤t->MemoryArea,
|
||||
FALSE,
|
||||
|
@ -761,7 +761,7 @@ CcRosCreateCacheSegment (
|
|||
}
|
||||
#endif
|
||||
|
||||
MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
|
||||
MmMapMemoryArea(current->BaseAddress, VACB_MAPPING_GRANULARITY,
|
||||
MC_CACHE, PAGE_READWRITE);
|
||||
|
||||
return(STATUS_SUCCESS);
|
||||
|
@ -784,17 +784,17 @@ CcRosGetCacheSegmentChain (
|
|||
|
||||
DPRINT("CcRosGetCacheSegmentChain()\n");
|
||||
|
||||
Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
|
||||
Length = ROUND_UP(Length, VACB_MAPPING_GRANULARITY);
|
||||
|
||||
CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
|
||||
(Length / Bcb->CacheSegmentSize));
|
||||
(Length / VACB_MAPPING_GRANULARITY));
|
||||
|
||||
/*
|
||||
* Look for a cache segment already mapping the same data.
|
||||
*/
|
||||
for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
|
||||
for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
|
||||
{
|
||||
ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
|
||||
ULONG CurrentOffset = FileOffset + (i * VACB_MAPPING_GRANULARITY);
|
||||
current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
|
||||
if (current != NULL)
|
||||
{
|
||||
|
@ -815,7 +815,7 @@ CcRosGetCacheSegmentChain (
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
|
||||
for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
|
||||
{
|
||||
if (i == 0)
|
||||
{
|
||||
|
@ -902,10 +902,10 @@ CcRosRequestCacheSegment (
|
|||
|
||||
ASSERT(Bcb);
|
||||
|
||||
if ((FileOffset % Bcb->CacheSegmentSize) != 0)
|
||||
if ((FileOffset % VACB_MAPPING_GRANULARITY) != 0)
|
||||
{
|
||||
DPRINT1("Bad fileoffset %x should be multiple of %x",
|
||||
FileOffset, Bcb->CacheSegmentSize);
|
||||
FileOffset, VACB_MAPPING_GRANULARITY);
|
||||
KeBugCheck(CACHE_MANAGER);
|
||||
}
|
||||
|
||||
|
@ -958,7 +958,7 @@ CcRosInternalFreeCacheSegment (
|
|||
}
|
||||
#endif
|
||||
#ifdef CACHE_BITMAP
|
||||
RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
RegionSize = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
|
||||
/* Unmap all the pages. */
|
||||
for (i = 0; i < RegionSize; i++)
|
||||
|
@ -1054,10 +1054,10 @@ CcFlushCache (
|
|||
KeReleaseGuardedMutex(&ViewLock);
|
||||
}
|
||||
|
||||
Offset.QuadPart += Bcb->CacheSegmentSize;
|
||||
if (Length > Bcb->CacheSegmentSize)
|
||||
Offset.QuadPart += VACB_MAPPING_GRANULARITY;
|
||||
if (Length > VACB_MAPPING_GRANULARITY)
|
||||
{
|
||||
Length -= Bcb->CacheSegmentSize;
|
||||
Length -= VACB_MAPPING_GRANULARITY;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1121,7 +1121,7 @@ CcRosDeleteFileCache (
|
|||
if (current->Dirty)
|
||||
{
|
||||
RemoveEntryList(¤t->DirtySegmentListEntry);
|
||||
DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
|
||||
DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
|
||||
DPRINT1("Freeing dirty segment\n");
|
||||
}
|
||||
InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
|
||||
|
|
|
@ -1177,7 +1177,7 @@ MiReadPage(PMEMORY_AREA MemoryArea,
|
|||
|
||||
Process = PsGetCurrentProcess();
|
||||
PageAddr = MiMapPageInHyperSpace(Process, *Page, &Irql);
|
||||
CacheSegOffset = (ULONG_PTR)(BaseOffset + CacheSeg->Bcb->CacheSegmentSize - FileOffset);
|
||||
CacheSegOffset = (ULONG_PTR)(BaseOffset + VACB_MAPPING_GRANULARITY - FileOffset);
|
||||
Length = RawLength - SegOffset;
|
||||
if (Length <= CacheSegOffset && Length <= PAGE_SIZE)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue