- Fix IRP/Completion packet lookaside allocation. We weren't setting a zone size, and I/O Completion packets were also using the wrong size.

- Rewrite I/O MDl support to use lookaside lists for allocations below 23 pages (same as on NT). This is is an incredible performance optimization because MDLs are often allocated and de-allocated during I/O operations, and using the lookaside list decreases pool fragmentation and slowdown.
- Rewrite IoBuildPartialMdl. It did not work like documented in the DDK and also had a bug documented by Microsoft as being in XP.

svn path=/trunk/; revision=22717
This commit is contained in:
Alex Ionescu 2006-06-30 15:59:06 +00:00
parent 77aa2de357
commit f13cb8709c
3 changed files with 162 additions and 96 deletions

View file

@ -48,6 +48,14 @@
// //
#define PARTITION_TBL_SIZE 4 #define PARTITION_TBL_SIZE 4
//
// We can call the Ob Inlined API, it's the same thing
//
#define IopAllocateMdlFromLookaside \
ObpAllocateCapturedAttributes
#define IopFreeMdlFromLookaside \
ObpFreeCapturedAttributes
// //
// Returns the size of a CM_RESOURCE_LIST // Returns the size of a CM_RESOURCE_LIST
// //
@ -830,4 +838,4 @@ xHalIoWritePartitionTable(
extern POBJECT_TYPE IoCompletionType; extern POBJECT_TYPE IoCompletionType;
extern PDEVICE_NODE IopRootDeviceNode; extern PDEVICE_NODE IopRootDeviceNode;
extern ULONG IopTraceLevel; extern ULONG IopTraceLevel;
extern NPAGED_LOOKASIDE_LIST IopMdlLookasideList;

View file

@ -41,6 +41,7 @@ extern NPAGED_LOOKASIDE_LIST IoCompletionPacketLookaside;
extern POBJECT_TYPE IoAdapterObjectType; extern POBJECT_TYPE IoAdapterObjectType;
NPAGED_LOOKASIDE_LIST IoLargeIrpLookaside; NPAGED_LOOKASIDE_LIST IoLargeIrpLookaside;
NPAGED_LOOKASIDE_LIST IoSmallIrpLookaside; NPAGED_LOOKASIDE_LIST IoSmallIrpLookaside;
NPAGED_LOOKASIDE_LIST IopMdlLookasideList;
VOID INIT_FUNCTION IopInitLookasideLists(VOID); VOID INIT_FUNCTION IopInitLookasideLists(VOID);
@ -74,7 +75,7 @@ VOID
INIT_FUNCTION INIT_FUNCTION
IopInitLookasideLists(VOID) IopInitLookasideLists(VOID)
{ {
ULONG LargeIrpSize, SmallIrpSize; ULONG LargeIrpSize, SmallIrpSize, MdlSize;
LONG i; LONG i;
PKPRCB Prcb; PKPRCB Prcb;
PNPAGED_LOOKASIDE_LIST CurrentList = NULL; PNPAGED_LOOKASIDE_LIST CurrentList = NULL;
@ -82,6 +83,7 @@ IopInitLookasideLists(VOID)
/* Calculate the sizes */ /* Calculate the sizes */
LargeIrpSize = sizeof(IRP) + (8 * sizeof(IO_STACK_LOCATION)); LargeIrpSize = sizeof(IRP) + (8 * sizeof(IO_STACK_LOCATION));
SmallIrpSize = sizeof(IRP) + sizeof(IO_STACK_LOCATION); SmallIrpSize = sizeof(IRP) + sizeof(IO_STACK_LOCATION);
MdlSize = sizeof(MDL) + (23 * sizeof(PFN_NUMBER));
/* Initialize the Lookaside List for Large IRPs */ /* Initialize the Lookaside List for Large IRPs */
ExInitializeNPagedLookasideList(&IoLargeIrpLookaside, ExInitializeNPagedLookasideList(&IoLargeIrpLookaside,
@ -90,7 +92,7 @@ IopInitLookasideLists(VOID)
0, 0,
LargeIrpSize, LargeIrpSize,
IO_LARGEIRP, IO_LARGEIRP,
0); 64);
/* Initialize the Lookaside List for Small IRPs */ /* Initialize the Lookaside List for Small IRPs */
ExInitializeNPagedLookasideList(&IoSmallIrpLookaside, ExInitializeNPagedLookasideList(&IoSmallIrpLookaside,
@ -99,7 +101,7 @@ IopInitLookasideLists(VOID)
0, 0,
SmallIrpSize, SmallIrpSize,
IO_SMALLIRP, IO_SMALLIRP,
0); 32);
/* Initialize the Lookaside List for I\O Completion */ /* Initialize the Lookaside List for I\O Completion */
ExInitializeNPagedLookasideList(&IoCompletionPacketLookaside, ExInitializeNPagedLookasideList(&IoCompletionPacketLookaside,
@ -108,7 +110,16 @@ IopInitLookasideLists(VOID)
0, 0,
sizeof(IO_COMPLETION_PACKET), sizeof(IO_COMPLETION_PACKET),
IOC_TAG1, IOC_TAG1,
0); 32);
/* Initialize the Lookaside List for MDLs */
ExInitializeNPagedLookasideList(&IopMdlLookasideList,
NULL,
NULL,
0,
MdlSize,
TAG_MDL,
128);
/* Now allocate the per-processor lists */ /* Now allocate the per-processor lists */
for (i = 0; i < KeNumberProcessors; i++) for (i = 0; i < KeNumberProcessors; i++)
@ -131,7 +142,7 @@ IopInitLookasideLists(VOID)
0, 0,
LargeIrpSize, LargeIrpSize,
IO_LARGEIRP_CPU, IO_LARGEIRP_CPU,
0); 64);
} }
else else
{ {
@ -146,14 +157,14 @@ IopInitLookasideLists(VOID)
IO_SMALLIRP_CPU); IO_SMALLIRP_CPU);
if (CurrentList) if (CurrentList)
{ {
/* Initialize the Lookaside List for Large IRPs */ /* Initialize the Lookaside List for Small IRPs */
ExInitializeNPagedLookasideList(CurrentList, ExInitializeNPagedLookasideList(CurrentList,
NULL, NULL,
NULL, NULL,
0, 0,
SmallIrpSize, SmallIrpSize,
IO_SMALLIRP_CPU, IO_SMALLIRP_CPU,
0); 32);
} }
else else
{ {
@ -173,15 +184,37 @@ IopInitLookasideLists(VOID)
NULL, NULL,
NULL, NULL,
0, 0,
SmallIrpSize, sizeof(IO_COMPLETION_PACKET),
IOC_CPU, IO_SMALLIRP_CPU,
0); 32);
} }
else else
{ {
CurrentList = &IoCompletionPacketLookaside; CurrentList = &IoCompletionPacketLookaside;
} }
Prcb->PPLookasideList[LookasideCompletionList].P = &CurrentList->L; Prcb->PPLookasideList[LookasideCompletionList].P = &CurrentList->L;
/* Set the MDL Completion List */
Prcb->PPLookasideList[LookasideMdlList].L = &IopMdlLookasideList.L;
CurrentList = ExAllocatePoolWithTag(NonPagedPool,
sizeof(NPAGED_LOOKASIDE_LIST),
TAG_MDL);
if (CurrentList)
{
/* Initialize the Lookaside List for MDLs */
ExInitializeNPagedLookasideList(CurrentList,
NULL,
NULL,
0,
SmallIrpSize,
TAG_MDL,
128);
}
else
{
CurrentList = &IopMdlLookasideList;
}
Prcb->PPLookasideList[LookasideMdlList].P = &CurrentList->L;
} }
DPRINT("Done allocation\n"); DPRINT("Done allocation\n");

View file

@ -1,11 +1,9 @@
/* $Id$ /*
* * PROJECT: ReactOS Kernel
* COPYRIGHT: See COPYING in the top level directory * LICENSE: GPL - See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/io/mdl.c * FILE: ntoskrnl/io/mdl.c
* PURPOSE: Io manager mdl functions * PURPOSE: I/O Wrappers for MDL Allocation and Deallocation
* * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
* PROGRAMMERS: David Welch (welch@mcmail.com)
*/ */
/* INCLUDES *****************************************************************/ /* INCLUDES *****************************************************************/
@ -20,116 +18,143 @@
* @implemented * @implemented
*/ */
PMDL PMDL
STDCALL NTAPI
IoAllocateMdl(PVOID VirtualAddress, IoAllocateMdl(IN PVOID VirtualAddress,
ULONG Length, IN ULONG Length,
BOOLEAN SecondaryBuffer, IN BOOLEAN SecondaryBuffer,
BOOLEAN ChargeQuota, IN BOOLEAN ChargeQuota,
PIRP Irp) IN PIRP Irp)
{ {
PMDL Mdl; PMDL Mdl = NULL, p;
ULONG Flags = 0;
ULONG Size;
if (ChargeQuota) /* Fail if allocation is over 2GB */
{ if (Length & 0x80000000) return NULL;
// Mdl = ExAllocatePoolWithQuota(NonPagedPool,
// MmSizeOfMdl(VirtualAddress,Length));
Mdl = ExAllocatePoolWithTag(NonPagedPool,
MmSizeOfMdl(VirtualAddress,Length),
TAG_MDL);
}
else
{
Mdl = ExAllocatePoolWithTag(NonPagedPool,
MmSizeOfMdl(VirtualAddress,Length),
TAG_MDL);
}
MmInitializeMdl(Mdl, (char*)VirtualAddress, Length);
if (Irp) /* Calculate the number of pages for the allocation */
{ Size = ADDRESS_AND_SIZE_TO_SPAN_PAGES(VirtualAddress, Length);
if (SecondaryBuffer) if (Size > 23)
{ {
ASSERT(Irp->MdlAddress); /* This is bigger then our fixed-size MDLs. Calculate real size */
Size *= sizeof(PFN_NUMBER);
Size += sizeof(MDL);
if (Size > MAXUSHORT) return NULL;
}
else
{
/* Use an internal fixed MDL size */
Size = (23 * sizeof(PFN_NUMBER)) + sizeof(MDL);
Flags |= MDL_ALLOCATED_FIXED_SIZE;
/* FIXME: add to end of list maybe?? */ /* Allocate one from the lookaside list */
Mdl->Next = Irp->MdlAddress->Next; Mdl = IopAllocateMdlFromLookaside(LookasideMdlList);
Irp->MdlAddress->Next = Mdl; }
/* Check if we don't have an mdl yet */
if (!Mdl)
{
/* Allocate one from pool */
Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
if (!Mdl) return NULL;
}
/* Initialize it */
MmInitializeMdl(Mdl, VirtualAddress, Length);
Mdl->MdlFlags |= Flags;
/* Check if an IRP was given too */
if (Irp)
{
/* Check if it came with a secondary buffer */
if (SecondaryBuffer)
{
/* Insert the MDL at the end */
p = Irp->MdlAddress;
while (p->Next) p = p->Next;
p->Next = Mdl;
} }
else else
{ {
/* /* Otherwise, insert it directly */
* What if there's allready an mdl at Irp->MdlAddress? Irp->MdlAddress = Mdl;
* Is that bad and should we do something about it?
*/
Irp->MdlAddress = Mdl;
} }
} }
return(Mdl); /* Return the allocated mdl */
return Mdl;
} }
/* /*
* @implemented * @implemented
*
* You must IoFreeMdl the slave before freeing the master.
*
* IoBuildPartialMdl is more similar to MmBuildMdlForNonPagedPool, the difference
* is that the former takes the physical addresses from the master MDL, while the
* latter - from the known location of the NPP.
*/ */
VOID VOID
STDCALL NTAPI
IoBuildPartialMdl(PMDL SourceMdl, IoBuildPartialMdl(IN PMDL SourceMdl,
PMDL TargetMdl, IN PMDL TargetMdl,
PVOID VirtualAddress, IN PVOID VirtualAddress,
ULONG Length) IN ULONG Length)
{ {
PPFN_TYPE TargetPages = (PPFN_TYPE)(TargetMdl + 1); PPFN_TYPE TargetPages = (PPFN_TYPE)(TargetMdl + 1);
PPFN_TYPE SourcePages = (PPFN_TYPE)(SourceMdl + 1); PPFN_TYPE SourcePages = (PPFN_TYPE)(SourceMdl + 1);
ULONG Count; ULONG Offset;
ULONG Delta;
DPRINT("VirtualAddress 0x%p, SourceMdl->StartVa 0x%p, SourceMdl->MappedSystemVa 0x%p\n", /* Calculate the offset */
VirtualAddress, SourceMdl->StartVa, SourceMdl->MappedSystemVa); Offset = (ULONG)((ULONG_PTR)VirtualAddress -
(ULONG_PTR)SourceMdl->StartVa) -
SourceMdl->ByteOffset;
TargetMdl->StartVa = (PVOID)PAGE_ROUND_DOWN(VirtualAddress); /* Check if we don't have a length and calculate it */
TargetMdl->ByteOffset = (ULONG_PTR)VirtualAddress - (ULONG_PTR)TargetMdl->StartVa; if (!Length) Length = SourceMdl->ByteCount - Offset;
TargetMdl->ByteCount = Length;
TargetMdl->Process = SourceMdl->Process;
Delta = (ULONG_PTR)VirtualAddress - ((ULONG_PTR)SourceMdl->StartVa + SourceMdl->ByteOffset);
TargetMdl->MappedSystemVa = (char*)SourceMdl->MappedSystemVa + Delta;
TargetMdl->MdlFlags = SourceMdl->MdlFlags & (MDL_IO_PAGE_READ|MDL_SOURCE_IS_NONPAGED_POOL|MDL_MAPPED_TO_SYSTEM_VA); /* Write the process, start VA and byte data */
TargetMdl->MdlFlags |= MDL_PARTIAL; TargetMdl->StartVa = (PVOID)PAGE_ROUND_DOWN(VirtualAddress);
TargetMdl->Process = SourceMdl->Process;
TargetMdl->ByteCount = Length;
TargetMdl->ByteOffset = BYTE_OFFSET(VirtualAddress);
Delta = ((ULONG_PTR)TargetMdl->StartVa - (ULONG_PTR)SourceMdl->StartVa) / PAGE_SIZE; /* Recalculate the length in pages */
Count = ADDRESS_AND_SIZE_TO_SPAN_PAGES(VirtualAddress,Length); Length = ADDRESS_AND_SIZE_TO_SPAN_PAGES(VirtualAddress, Length);
SourcePages += Delta; /* Set the MDL Flags */
TargetMdl->MdlFlags = (MDL_ALLOCATED_FIXED_SIZE | MDL_ALLOCATED_MUST_SUCCEED);
TargetMdl->MdlFlags |= (MDL_IO_PAGE_READ |
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_MAPPED_TO_SYSTEM_VA |
MDL_IO_SPACE);
TargetMdl->MdlFlags |= MDL_PARTIAL;
DPRINT("Delta %d, Count %d\n", Delta, Count); /* Set the mapped VA */
TargetMdl->MappedSystemVa = (PCHAR)SourceMdl->MappedSystemVa + Offset;
memcpy(TargetPages, SourcePages, Count * sizeof(PFN_TYPE));
/* Now do the copy */
Offset = ((ULONG_PTR)TargetMdl->StartVa - (ULONG_PTR)SourceMdl->StartVa) >>
PAGE_SHIFT;
SourcePages += Offset;
RtlMoveMemory(TargetPages, SourcePages, Length * sizeof(PFN_TYPE));
} }
/* /*
* @implemented * @implemented
*/ */
VOID STDCALL VOID
NTAPI
IoFreeMdl(PMDL Mdl) IoFreeMdl(PMDL Mdl)
{ {
/* /* Tell Mm to reuse the MDL */
* This unmaps partial mdl's from kernel space but also asserts that non-partial MmPrepareMdlForReuse(Mdl);
* mdl's isn't still mapped into kernel space.
*/
ASSERT(Mdl);
ASSERT_IRQL(DISPATCH_LEVEL);
MmPrepareMdlForReuse(Mdl); /* Check if this was a pool allocation */
if (!(Mdl->MdlFlags & MDL_ALLOCATED_FIXED_SIZE))
ExFreePoolWithTag(Mdl, TAG_MDL); {
/* Free it from the pool */
ExFreePoolWithTag(Mdl, TAG_MDL);
}
else
{
/* Free it from the lookaside */
IopFreeMdlFromLookaside(Mdl, LookasideMdlList);
}
} }
/* EOF */ /* EOF */