services/fs/cdfs/.cvsignore: Added .cvsignore file

ntoskrnl/ex/fmutex.c: ExAcquireFastMutex: Added an check for recursively
acquiring a FAST_MUTEX.
ntoskrnl/include/internal/mm.h: ADDRESS_SPACE: Changed lock from a KMUTEX
to a FAST_MUTEX.
ntoskrnl/mm/aspace: MmLockAddressSpace, MmUnlockAddressSpace,
MmInitializeAddressSpace: Changed lock from a KMUTEX to a FAST_MUTEX
ntoskrnl/mm/mdl.c: MmInitializeMdlImplementation, MmMapLockedPages,
MmUnmapLockedPages: Use a special region that doesn't need the address
space lock for mapping MDLs.
ntoskrnl/mm/mminit.c: MmInit2: Call MDL initialization routine.
ntoskrnl/mm/section.c: MmMapViewOfSegment: Changed to a static function,
don't lock the address space since it is done by the caller.
ntoskrnl/ps/thread.c: PsDumpThreads: Fixed garbage prints in stack dump.

svn path=/trunk/; revision=2968
This commit is contained in:
David Welch 2002-05-17 23:01:57 +00:00
parent aec0b9bcfc
commit daad760238
8 changed files with 169 additions and 94 deletions

View file

@ -0,0 +1,6 @@
base.tmp
junk.tmp
temp.exp
cdfs.coff
cdfs.sys.unstripped
*.d

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: fmutex.c,v 1.12 2001/12/27 23:56:42 dwelch Exp $
/* $Id: fmutex.c,v 1.13 2002/05/17 23:01:56 dwelch Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/ex/fmutex.c
@ -33,12 +33,12 @@
#include <internal/debug.h>
/* FUNCTIONS *****************************************************************/
VOID FASTCALL
ExAcquireFastMutexUnsafe(PFAST_MUTEX FastMutex)
{
assert(FastMutex->Owner != KeGetCurrentThread());
InterlockedIncrement(&FastMutex->Contention);
while (InterlockedExchange(&FastMutex->Count, 0) == 0)
{

View file

@ -146,7 +146,7 @@ typedef struct
typedef struct _MADDRESS_SPACE
{
LIST_ENTRY MAreaListHead;
KMUTEX Lock;
FAST_MUTEX Lock;
ULONG LowestAddress;
struct _EPROCESS* Process;
PUSHORT PageTableRefCountTable;
@ -509,5 +509,7 @@ BOOLEAN MmIsPageSwapEntry(PEPROCESS Process, PVOID Address);
VOID
MmTransferOwnershipPage(PVOID PhysicalAddress, ULONG NewConsumer);
VOID MmSetDirtyPage(PEPROCESS Process, PVOID Address);
VOID
MmInitializeMdlImplementation(VOID);
#endif

View file

@ -1,4 +1,4 @@
/* $Id: aspace.c,v 1.10 2002/05/14 21:19:18 dwelch Exp $
/* $Id: aspace.c,v 1.11 2002/05/17 23:01:56 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -36,11 +36,7 @@ MmLockAddressSpace(PMADDRESS_SPACE AddressSpace)
{
return;
}
(VOID)KeWaitForMutexObject(&AddressSpace->Lock,
0,
KernelMode,
FALSE,
NULL);
ExAcquireFastMutex(&AddressSpace->Lock);
}
VOID
@ -53,7 +49,7 @@ MmUnlockAddressSpace(PMADDRESS_SPACE AddressSpace)
{
return;
}
KeReleaseMutex(&AddressSpace->Lock, FALSE);
ExReleaseFastMutex(&AddressSpace->Lock);
}
VOID
@ -77,7 +73,7 @@ MmInitializeAddressSpace(PEPROCESS Process,
PMADDRESS_SPACE AddressSpace)
{
InitializeListHead(&AddressSpace->MAreaListHead);
KeInitializeMutex(&AddressSpace->Lock, 1);
ExInitializeFastMutex(&AddressSpace->Lock);
if (Process != NULL)
{
AddressSpace->LowestAddress = MM_LOWEST_USER_ADDRESS;

View file

@ -1,4 +1,4 @@
/* $Id: mdl.c,v 1.39 2002/05/14 21:19:19 dwelch Exp $
/* $Id: mdl.c,v 1.40 2002/05/17 23:01:56 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -23,8 +23,46 @@
#define TAG_MDL TAG('M', 'M', 'D', 'L')
#define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
static PVOID MiMdlMappingRegionBase = NULL;
static PULONG MiMdlMappingRegionAllocMap = NULL;
static ULONG MiMdlMappingRegionHighWaterMark = 0;
static KSPIN_LOCK MiMdlMappingRegionLock;
/* FUNCTIONS *****************************************************************/
VOID
MmInitializeMdlImplementation(VOID)
{
MEMORY_AREA* Result;
NTSTATUS Status;
MiMdlMappingRegionBase = NULL;
MmLockAddressSpace(MmGetKernelAddressSpace());
Status = MmCreateMemoryArea(NULL,
MmGetKernelAddressSpace(),
MEMORY_AREA_MDL_MAPPING,
&MiMdlMappingRegionBase,
MI_MDL_MAPPING_REGION_SIZE,
0,
&Result,
FALSE);
if (!NT_SUCCESS(Status))
{
MmUnlockAddressSpace(MmGetKernelAddressSpace());
KeBugCheck(0);
}
MmUnlockAddressSpace(MmGetKernelAddressSpace());
MiMdlMappingRegionAllocMap =
ExAllocatePool(NonPagedPool,
MI_MDL_MAPPING_REGION_SIZE / (PAGESIZE * 32));
MiMdlMappingRegionHighWaterMark = 0;
KeInitializeSpinLock(&MiMdlMappingRegionLock);
}
PVOID
MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
{
@ -75,22 +113,23 @@ MmUnlockPages(PMDL Mdl)
Mdl->MdlFlags = Mdl->MdlFlags & (~MDL_PAGES_LOCKED);
}
PVOID STDCALL MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
PVOID STDCALL
MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
/*
* FUNCTION: Maps the physical pages described by a given MDL
* ARGUMENTS:
* Mdl = Points to an MDL updated by MmProbeAndLockPages
* AccessMode = Specifies the access mode in which to map the MDL
* AccessMode = Specifies the portion of the address space to map the
* pages.
* RETURNS: The base virtual address that maps the locked pages for the
* range described by the MDL
* FIXME: What does AccessMode do?
*/
{
PVOID Base;
ULONG i;
PULONG MdlPages;
MEMORY_AREA* Result;
NTSTATUS Status;
KIRQL oldIrql;
ULONG RegionSize;
DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
@ -99,45 +138,49 @@ PVOID STDCALL MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
return(Mdl->MappedSystemVa);
}
MmLockAddressSpace(MmGetKernelAddressSpace());
Base = NULL;
Status = MmCreateMemoryArea(NULL,
MmGetKernelAddressSpace(),
MEMORY_AREA_MDL_MAPPING,
&Base,
Mdl->ByteCount + Mdl->ByteOffset,
0,
&Result,
FALSE);
if (!NT_SUCCESS(Status))
if (AccessMode == UserMode)
{
MmUnlockAddressSpace(MmGetKernelAddressSpace());
KeBugCheck(0);
return(STATUS_SUCCESS);
DPRINT1("MDL mapping to user-mode not yet handled.\n");
KeBugCheck(0);
}
MmUnlockAddressSpace(MmGetKernelAddressSpace());
/* Calculate the number of pages required. */
RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGESIZE;
/* Allocate that number of pages from the mdl mapping region. */
KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
Base = MiMdlMappingRegionBase + MiMdlMappingRegionHighWaterMark * PAGESIZE;
for (i = 0; i < RegionSize; i++)
{
ULONG Offset = MiMdlMappingRegionHighWaterMark + i;
MiMdlMappingRegionAllocMap[Offset / 32] |= (1 << (Offset % 32));
}
MiMdlMappingRegionHighWaterMark += RegionSize;
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
/* Set the virtual mappings for the MDL pages. */
MdlPages = (PULONG)(Mdl + 1);
for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGESIZE); i++)
for (i = 0; i < RegionSize; i++)
{
Status = MmCreateVirtualMapping(NULL,
(PVOID)((ULONG)Base+(i*PAGESIZE)),
PAGE_READWRITE,
MdlPages[i],
TRUE);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
KeBugCheck(0);
}
NTSTATUS Status;
Status = MmCreateVirtualMapping(NULL,
(PVOID)((ULONG)Base+(i*PAGESIZE)),
PAGE_READWRITE,
MdlPages[i],
FALSE);
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
KeBugCheck(0);
}
}
/* Mark the MDL has having being mapped. */
Mdl->MdlFlags = Mdl->MdlFlags | MDL_MAPPED_TO_SYSTEM_VA;
Mdl->MappedSystemVa = Base + Mdl->ByteOffset;
return(Base + Mdl->ByteOffset);
}
VOID STDCALL
MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
/*
@ -147,26 +190,54 @@ MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
* MemoryDescriptorList = MDL describing the mapped pages
*/
{
DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", Mdl, BaseAddress);
KIRQL oldIrql;
ULONG i;
ULONG RegionSize;
ULONG Base;
/*
* In this case, the MDL has the same system address as the base address
* so there is no need to free it
*/
if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
{
return;
}
DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", Mdl, BaseAddress);
MmLockAddressSpace(MmGetKernelAddressSpace());
(VOID)MmFreeMemoryArea(MmGetKernelAddressSpace(),
BaseAddress - Mdl->ByteOffset,
Mdl->ByteOffset + Mdl->ByteCount,
NULL,
NULL);
Mdl->MdlFlags = Mdl->MdlFlags & ~MDL_MAPPED_TO_SYSTEM_VA;
Mdl->MappedSystemVa = NULL;
MmUnlockAddressSpace(MmGetKernelAddressSpace());
/*
* In this case, the MDL has the same system address as the base address
* so there is no need to free it
*/
if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
{
return;
}
/* Calculate the number of pages we mapped. */
RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGESIZE;
KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
/* Deallocate all the pages used. */
Base = (ULONG)(BaseAddress - MiMdlMappingRegionBase - Mdl->ByteOffset);
Base = Base / PAGESIZE;
for (i = 0; i < RegionSize; i++)
{
ULONG Offset = Base + i;
MiMdlMappingRegionAllocMap[Offset / 32] &= ~(1 << (Offset % 32));
}
/* If all the pages below the high-water mark are free then move it down. */
if ((Base + RegionSize) == MiMdlMappingRegionHighWaterMark)
{
MiMdlMappingRegionHighWaterMark = Base;
}
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
/* Unmap all the pages. */
for (i = 0; i < RegionSize; i++)
{
MmDeleteVirtualMapping(NULL,
BaseAddress + (i * PAGESIZE),
FALSE,
NULL,
NULL);
}
/* Reset the MDL state. */
Mdl->MdlFlags = Mdl->MdlFlags & ~MDL_MAPPED_TO_SYSTEM_VA;
Mdl->MappedSystemVa = NULL;
}

View file

@ -1,4 +1,4 @@
/* $Id: mminit.c,v 1.33 2002/05/14 21:19:19 dwelch Exp $
/* $Id: mminit.c,v 1.34 2002/05/17 23:01:56 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -338,6 +338,8 @@ VOID MmInit1(ULONG FirstKrnlPhysAddr,
* Intialize memory areas
*/
MmInitVirtualMemory(LastKernelAddress, kernel_len);
MmInitializeMdlImplementation();
}
VOID MmInit2(VOID)

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: section.c,v 1.82 2002/05/14 21:19:19 dwelch Exp $
/* $Id: section.c,v 1.83 2002/05/17 23:01:56 dwelch Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/section.c
@ -2396,7 +2396,7 @@ NtOpenSection(PHANDLE SectionHandle,
return(Status);
}
NTSTATUS
NTSTATUS STATIC
MmMapViewOfSegment(PEPROCESS Process,
PMADDRESS_SPACE AddressSpace,
PSECTION_OBJECT Section,
@ -2410,7 +2410,6 @@ MmMapViewOfSegment(PEPROCESS Process,
NTSTATUS Status;
KIRQL oldIrql;
MmLockAddressSpace(&Process->AddressSpace);
if (Protect == PAGE_NOACCESS || Protect == PAGE_GUARD)
{
DPRINT1("Mapping inaccessible region between 0x%.8X and 0x%.8X\n",
@ -2424,29 +2423,28 @@ MmMapViewOfSegment(PEPROCESS Process,
Protect,
&MArea,
FALSE);
MmUnlockAddressSpace(&Process->AddressSpace);
if (!NT_SUCCESS(Status))
{
DPRINT1("Mapping between 0x%.8X and 0x%.8X failed.\n",
(*BaseAddress), (*BaseAddress) + ViewSize);
return(Status);
}
if (!NT_SUCCESS(Status))
{
DPRINT1("Mapping between 0x%.8X and 0x%.8X failed.\n",
(*BaseAddress), (*BaseAddress) + ViewSize);
return(Status);
}
KeAcquireSpinLock(&Section->ViewListLock, &oldIrql);
InsertTailList(&Section->ViewListHead,
&MArea->Data.SectionData.ViewListEntry);
KeReleaseSpinLock(&Section->ViewListLock, oldIrql);
KeAcquireSpinLock(&Section->ViewListLock, &oldIrql);
InsertTailList(&Section->ViewListHead,
&MArea->Data.SectionData.ViewListEntry);
KeReleaseSpinLock(&Section->ViewListLock, oldIrql);
ObReferenceObjectByPointer((PVOID)Section,
SECTION_MAP_READ,
NULL,
ExGetPreviousMode());
MArea->Data.SectionData.Segment = Segment;
MArea->Data.SectionData.Section = Section;
MArea->Data.SectionData.ViewOffset = ViewOffset;
MArea->Data.SectionData.WriteCopyView = FALSE;
ObReferenceObjectByPointer((PVOID)Section,
SECTION_MAP_READ,
NULL,
ExGetPreviousMode());
MArea->Data.SectionData.Segment = Segment;
MArea->Data.SectionData.Section = Section;
MArea->Data.SectionData.ViewOffset = ViewOffset;
MArea->Data.SectionData.WriteCopyView = FALSE;
return(STATUS_SUCCESS);
return(STATUS_SUCCESS);
}

View file

@ -1,4 +1,4 @@
/* $Id: thread.c,v 1.92 2002/05/14 21:19:21 dwelch Exp $
/* $Id: thread.c,v 1.93 2002/05/17 23:01:56 dwelch Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -125,7 +125,7 @@ VOID PsDumpThreads(BOOLEAN IncludeSystem)
i = 0;
while (Ebp != 0 && Ebp >= (PULONG)current->Tcb.StackLimit)
{
DbgPrint("%.8X%s", Ebp[0], Ebp[1],
DbgPrint("%.8X %.8X%s", Ebp[0], Ebp[1],
(i % 8) == 7 ? "\n" : " ");
Ebp = (PULONG)Ebp[0];
i++;