[0.4.14][RTL] Optimize RtlpFindAndCommitPages CORE-14588

A squashed backmerge of 3 commits authored by Thomas Faber.
They do help to speed up RosBE2.1.6 within ReactOS.
'configure' runs faster by ~ factor 5.
'ninja bootcd -j1' runs faster by ~ factor 2.
I considered back-porting that a necessity because we
considerably lost speed over the last years in those scenarios.
I tested, and it works well also in the 0.4.14RC.

0.4.15-dev-309-g
10d23614d3
Use LastEntryInSegment to speed up RtlpFindAndCommitPages. CORE-14588
--------------------------
0.4.15-dev-307-g
1b7a4b3ebf
Add and populate LastEntryInSegment. CORE-14588
--------------------------
0.4.15-dev-306-g
78dddd125c
Create a define for the common members of HEAP and HEAP_SEGMENT. CORE-14588

The code relies on these members matching up, so it's confusing for them
to be duplicated.
This commit is contained in:
Thomas Faber 2020-02-01 21:34:03 +01:00 committed by Joachim Henze
parent 0e72f93687
commit 4111dfa8bf
2 changed files with 100 additions and 38 deletions

View file

@ -665,16 +665,21 @@ RtlpFindAndCommitPages(PHEAP Heap,
/* Calculate first and last entries */
FirstEntry = (PHEAP_ENTRY)Address;
/* Go through the entries to find the last one */
if (PreviousUcr)
LastEntry = (PHEAP_ENTRY)((ULONG_PTR)PreviousUcr->Address + PreviousUcr->Size);
else
LastEntry = &Segment->Entry;
while (!(LastEntry->Flags & HEAP_ENTRY_LAST_ENTRY))
LastEntry = Segment->LastEntryInSegment;
if (!(LastEntry->Flags & HEAP_ENTRY_LAST_ENTRY) ||
LastEntry + LastEntry->Size != FirstEntry)
{
ASSERT(LastEntry->Size != 0);
LastEntry += LastEntry->Size;
/* Go through the entries to find the last one */
if (PreviousUcr)
LastEntry = (PHEAP_ENTRY)((ULONG_PTR)PreviousUcr->Address + PreviousUcr->Size);
else
LastEntry = &Segment->Entry;
while (!(LastEntry->Flags & HEAP_ENTRY_LAST_ENTRY))
{
ASSERT(LastEntry->Size != 0);
LastEntry += LastEntry->Size;
}
}
ASSERT((LastEntry + LastEntry->Size) == FirstEntry);
@ -702,10 +707,12 @@ RtlpFindAndCommitPages(PHEAP Heap,
if(UcrDescriptor->Address == Segment->LastValidEntry)
{
FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY;
Segment->LastEntryInSegment = FirstEntry;
}
else
{
FirstEntry->Flags = 0;
Segment->LastEntryInSegment = Segment->FirstEntry;
/* Update field of next entry */
ASSERT((FirstEntry + FirstEntry->Size)->PreviousSize == 0);
(FirstEntry + FirstEntry->Size)->PreviousSize = FirstEntry->Size;
@ -720,6 +727,7 @@ RtlpFindAndCommitPages(PHEAP Heap,
else
{
FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY;
Segment->LastEntryInSegment = FirstEntry;
}
/* We're done */
@ -841,6 +849,7 @@ RtlpDeCommitFreeBlock(PHEAP Heap,
FreeEntry->Flags = HEAP_ENTRY_LAST_ENTRY;
FreeEntry->Size = (USHORT)PrecedingSize;
Heap->TotalFreeSize += PrecedingSize;
Segment->LastEntryInSegment = FreeEntry;
/* Insert it into the free list */
RtlpInsertFreeBlockHelper(Heap, FreeEntry, PrecedingSize, FALSE);
@ -849,6 +858,13 @@ RtlpDeCommitFreeBlock(PHEAP Heap,
{
/* Adjust preceding in use entry */
PrecedingInUseEntry->Flags |= HEAP_ENTRY_LAST_ENTRY;
Segment->LastEntryInSegment = PrecedingInUseEntry;
}
else if ((ULONG_PTR)Segment->LastEntryInSegment >= DecommitBase &&
(ULONG_PTR)Segment->LastEntryInSegment < DecommitBase + DecommitSize)
{
/* Invalidate last entry */
Segment->LastEntryInSegment = Segment->FirstEntry;
}
/* Now the next one */
@ -933,6 +949,9 @@ RtlpInitializeHeapSegment(IN OUT PHEAP Heap,
RtlpInsertFreeBlock(Heap, (PHEAP_FREE_ENTRY) HeapEntry, (SegmentCommit >> HEAP_ENTRY_SHIFT) - Segment->Entry.Size);
}
/* Always point to a valid entry */
Segment->LastEntryInSegment = Segment->FirstEntry;
/* Initialise the Heap Segment UnCommitted Range information */
Segment->NumberOfUnCommittedPages = (ULONG)((SegmentReserve - SegmentCommit) >> PAGE_SHIFT);
Segment->NumberOfUnCommittedRanges = 0;
@ -984,6 +1003,7 @@ RtlpCoalesceFreeBlocks (PHEAP Heap,
BOOLEAN Remove)
{
PHEAP_FREE_ENTRY CurrentEntry, NextEntry;
UCHAR SegmentOffset;
/* Get the previous entry */
CurrentEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeEntry - FreeEntry->PreviousSize);
@ -1022,6 +1042,12 @@ RtlpCoalesceFreeBlocks (PHEAP Heap,
{
((PHEAP_ENTRY)FreeEntry + *FreeSize)->PreviousSize = (USHORT)(*FreeSize);
}
else
{
SegmentOffset = FreeEntry->SegmentOffset;
ASSERT(SegmentOffset < HEAP_SEGMENTS);
Heap->Segments[SegmentOffset]->LastEntryInSegment = FreeEntry;
}
}
/* Check the next block if it exists */
@ -1057,6 +1083,12 @@ RtlpCoalesceFreeBlocks (PHEAP Heap,
{
((PHEAP_ENTRY)FreeEntry + *FreeSize)->PreviousSize = (USHORT)(*FreeSize);
}
else
{
SegmentOffset = FreeEntry->SegmentOffset;
ASSERT(SegmentOffset < HEAP_SEGMENTS);
Heap->Segments[SegmentOffset]->LastEntryInSegment = FreeEntry;
}
}
}
return FreeEntry;
@ -1637,6 +1669,7 @@ RtlpSplitEntry(PHEAP Heap,
UCHAR FreeFlags, EntryFlags = HEAP_ENTRY_BUSY;
PHEAP_ENTRY InUseEntry;
SIZE_T FreeSize;
UCHAR SegmentOffset;
/* Add extra flags in case of settable user value feature is requested,
or there is a tag (small or normal) or there is a request to
@ -1748,6 +1781,12 @@ RtlpSplitEntry(PHEAP Heap,
/* Reset flags of the free entry */
FreeFlags = 0;
if (SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY)
{
SegmentOffset = SplitBlock->SegmentOffset;
ASSERT(SegmentOffset < HEAP_SEGMENTS);
Heap->Segments[SegmentOffset]->LastEntryInSegment = SplitBlock;
}
}
}
@ -2311,6 +2350,7 @@ RtlpGrowBlockInPlace (IN PHEAP Heap,
PHEAP_FREE_ENTRY FreeEntry, UnusedEntry, FollowingEntry;
SIZE_T FreeSize, PrevSize, TailPart, AddedSize = 0;
PHEAP_ENTRY_EXTRA OldExtra, NewExtra;
UCHAR SegmentOffset;
/* We can't grow beyond specified threshold */
if (Index > Heap->VirtualMemoryThreshold)
@ -2407,9 +2447,17 @@ RtlpGrowBlockInPlace (IN PHEAP Heap,
InUseEntry->Flags |= RememberFlags & HEAP_ENTRY_LAST_ENTRY;
/* Either update previous size of the next entry or mark it as a last
entry in the segment*/
entry in the segment */
if (!(RememberFlags & HEAP_ENTRY_LAST_ENTRY))
{
(InUseEntry + InUseEntry->Size)->PreviousSize = InUseEntry->Size;
}
else
{
SegmentOffset = InUseEntry->SegmentOffset;
ASSERT(SegmentOffset < HEAP_SEGMENTS);
Heap->Segments[SegmentOffset]->LastEntryInSegment = InUseEntry;
}
}
else
{
@ -2422,6 +2470,10 @@ RtlpGrowBlockInPlace (IN PHEAP Heap,
/* Update the following block or set the last entry in the segment */
if (RememberFlags & HEAP_ENTRY_LAST_ENTRY)
{
SegmentOffset = UnusedEntry->SegmentOffset;
ASSERT(SegmentOffset < HEAP_SEGMENTS);
Heap->Segments[SegmentOffset]->LastEntryInSegment = UnusedEntry;
/* Set flags and size */
UnusedEntry->Flags = RememberFlags;
UnusedEntry->Size = (USHORT)FreeSize;
@ -2468,7 +2520,15 @@ RtlpGrowBlockInPlace (IN PHEAP Heap,
UnusedEntry->Size = (USHORT)FreeSize;
if (!(RememberFlags & HEAP_ENTRY_LAST_ENTRY))
{
((PHEAP_ENTRY)UnusedEntry + FreeSize)->PreviousSize = (USHORT)FreeSize;
}
else
{
SegmentOffset = UnusedEntry->SegmentOffset;
ASSERT(SegmentOffset < HEAP_SEGMENTS);
Heap->Segments[SegmentOffset]->LastEntryInSegment = UnusedEntry;
}
/* Insert it back and update total size */
RtlpInsertFreeBlockHelper(Heap, UnusedEntry, FreeSize, FALSE);
@ -2578,6 +2638,7 @@ RtlReAllocateHeap(HANDLE HeapPtr,
SIZE_T RemainderBytes, ExtraSize;
PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock;
EXCEPTION_RECORD ExceptionRecord;
UCHAR SegmentOffset;
/* Return success in case of a null pointer */
if (!Ptr)
@ -2787,6 +2848,10 @@ RtlReAllocateHeap(HANDLE HeapPtr,
/* Is that the last entry */
if (FreeFlags & HEAP_ENTRY_LAST_ENTRY)
{
SegmentOffset = SplitBlock->SegmentOffset;
ASSERT(SegmentOffset < HEAP_SEGMENTS);
Heap->Segments[SegmentOffset]->LastEntryInSegment = SplitBlock;
/* Set its size and insert it to the list */
SplitBlock->Size = (USHORT)FreeSize;
RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE);
@ -2834,6 +2899,12 @@ RtlReAllocateHeap(HANDLE HeapPtr,
/* Update previous size of the next entry */
((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = (USHORT)FreeSize;
}
else
{
SegmentOffset = SplitBlock->SegmentOffset;
ASSERT(SegmentOffset < HEAP_SEGMENTS);
Heap->Segments[SegmentOffset]->LastEntryInSegment = SplitBlock;
}
/* Insert the new one back and update total size */
RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE);

View file

@ -203,22 +203,26 @@ typedef struct _HEAP_LIST_LOOKUP
PLIST_ENTRY *ListHints;
} HEAP_LIST_LOOKUP, *PHEAP_LIST_LOOKUP;
#define HEAP_SEGMENT_MEMBERS \
HEAP_ENTRY Entry; \
ULONG SegmentSignature; \
ULONG SegmentFlags; \
LIST_ENTRY SegmentListEntry; \
struct _HEAP *Heap; \
PVOID BaseAddress; \
ULONG NumberOfPages; \
PHEAP_ENTRY FirstEntry; \
PHEAP_ENTRY LastValidEntry; \
ULONG NumberOfUnCommittedPages; \
ULONG NumberOfUnCommittedRanges; \
USHORT SegmentAllocatorBackTraceIndex; \
USHORT Reserved; \
LIST_ENTRY UCRSegmentList; \
PVOID LastEntryInSegment //FIXME: non-Vista
typedef struct _HEAP
{
HEAP_ENTRY Entry;
ULONG SegmentSignature;
ULONG SegmentFlags;
LIST_ENTRY SegmentListEntry;
struct _HEAP *Heap;
PVOID BaseAddress;
ULONG NumberOfPages;
PHEAP_ENTRY FirstEntry;
PHEAP_ENTRY LastValidEntry;
ULONG NumberOfUnCommittedPages;
ULONG NumberOfUnCommittedRanges;
USHORT SegmentAllocatorBackTraceIndex;
USHORT Reserved;
LIST_ENTRY UCRSegmentList;
HEAP_SEGMENT_MEMBERS;
ULONG Flags;
ULONG ForceFlags;
@ -271,20 +275,7 @@ typedef struct _HEAP
typedef struct _HEAP_SEGMENT
{
HEAP_ENTRY Entry;
ULONG SegmentSignature;
ULONG SegmentFlags;
LIST_ENTRY SegmentListEntry;
PHEAP Heap;
PVOID BaseAddress;
ULONG NumberOfPages;
PHEAP_ENTRY FirstEntry;
PHEAP_ENTRY LastValidEntry;
ULONG NumberOfUnCommittedPages;
ULONG NumberOfUnCommittedRanges;
USHORT SegmentAllocatorBackTraceIndex;
USHORT Reserved;
LIST_ENTRY UCRSegmentList;
HEAP_SEGMENT_MEMBERS;
} HEAP_SEGMENT, *PHEAP_SEGMENT;
typedef struct _HEAP_UCR_DESCRIPTOR