diff --git a/reactos/lib/rtl/heap.c b/reactos/lib/rtl/heap.c index 0e43ccbcf66..2c28b6a76d7 100644 --- a/reactos/lib/rtl/heap.c +++ b/reactos/lib/rtl/heap.c @@ -1,1155 +1,1340 @@ /* COPYRIGHT: See COPYING in the top level directory * PROJECT: ReactOS system libraries - * FILE: lib/rtl/image.c - * PURPOSE: Image handling functions - * PROGRAMMERS: Copyright 1996 Alexandre Julliard - * Copyright 1998 Ulrich Weigand + * FILE: lib/rtl/heap.c + * PURPOSE: RTL Heap backend allocator + * PROGRAMMERS: Copyright 2010 Aleksey Bragin */ -// -// Note: This is a slightly modified implementation of WINE's. -// -// WINE's implementation is a hack based on Windows 95's heap implementation, -// itself a hack of DOS memory management.It supports 3 out of the 18 possible -// NT Heap Flags, does not support custom allocation/deallocation routines, -// and is about 50-80x slower with fragmentation rates up to 500x higher when -// compared to NT's LFH. WINE is lucky because the advanced NT Heap features are -// used in kernel-mode usually, not in user-mode, and they are crossing their -// fingers for this being the same. Note that several high-end SQL/Database -// applications would significantly benefit from custom heap features provided -// by NT. -// -// ROS's changes include: -// - Using Zw instead of Nt calls, because this matters when in Kernel Mode -// - Not using per-process heap lists while in Kernel Mode -// - Using a macro to handle the Critical Section, because it's meaningless -// in Kernel Mode. -// - Crappy support for a custom Commit routine. -// - Crappy support for User-defined flags and the User-defined value. -// - Ripping out all the code for shared heaps, because those don't exist on NT. -// -// Be aware of these changes when you try to sync something back. -// +/* Useful references: + http://msdn.microsoft.com/en-us/library/ms810466.aspx + http://msdn.microsoft.com/en-us/library/ms810603.aspx + http://www.securitylab.ru/analytics/216376.php + http://binglongx.spaces.live.com/blog/cns!142CBF6D49079DE8!596.entry + http://www.phreedom.org/research/exploits/asn1-bitstring/ + http://illmatics.com/Understanding_the_LFH.pdf + http://www.alex-ionescu.com/?p=18 +*/ /* INCLUDES *****************************************************************/ #include -#undef LIST_FOR_EACH -#undef LIST_FOR_EACH_SAFE -#include +#include #define NDEBUG #include -#define TRACE DPRINT -#define WARN DPRINT1 -#define ERR DPRINT1 -#define DPRINTF DPRINT +HEAP_LOCK RtlpProcessHeapsListLock; -/* FUNCTIONS *****************************************************************/ +/* Bitmaps stuff */ -#define WARN_ON(x) (1) - -#ifdef NDEBUG -#define TRACE_ON(x) (0) -#else -#define TRACE_ON(x) (1) -#endif - -/* Note: the heap data structures are based on what Pietrek describes in his - * book 'Windows 95 System Programming Secrets'. The layout is not exactly - * the same, but could be easily adapted if it turns out some programs - * require it. - */ - -/* FIXME: use SIZE_T for 'size' structure members, but we need to make sure - * that there is no unaligned accesses to structure fields. - */ - -typedef struct tagARENA_INUSE +/* How many least significant bits are clear */ +UCHAR RtlpBitsClearLow[] = { - SIZE_T size; /* Block size; must be the first field */ - DWORD magic : 23; /* Magic number */ - DWORD has_user_data : 1; /* There is user data associated with this block */ - DWORD unused_bytes : 8; /* Number of bytes in the block not used by user data (max value is HEAP_MIN_DATA_SIZE+HEAP_MIN_SHRINK_SIZE) */ -} ARENA_INUSE; - -typedef struct tagARENA_FREE -{ - SIZE_T size; /* Block size; must be the first field */ - DWORD magic; /* Magic number */ - struct list entry; /* Entry in free list */ -} ARENA_FREE; - -#define ARENA_FLAG_FREE 0x00000001 /* flags OR'ed with arena size */ -#define ARENA_FLAG_PREV_FREE 0x00000002 -#define ARENA_INUSE_MAGIC 0x455355 /* Value for arena 'magic' field */ -#define ARENA_FREE_MAGIC 0x45455246 /* Value for arena 'magic' field */ - -#ifndef _WIN64 -#define ARENA_SIZE_MASK (~3L) -#else -#define ARENA_SIZE_MASK (~7L) -#endif - -#define ARENA_INUSE_FILLER 0x55 -#define ARENA_FREE_FILLER 0xaa - -/* everything is aligned on 8 byte boundaries (16 for Win64)*/ -#define ALIGNMENT (2*sizeof(void*)) -#define ARENA_OFFSET (ALIGNMENT - sizeof(ARENA_INUSE)) - -#define ROUND_SIZE(size) ((((size) + ALIGNMENT - 1) & ~(ALIGNMENT-1)) + ARENA_OFFSET) - - -#define QUIET 1 /* Suppress messages */ -#define NOISY 0 /* Report all errors */ - -/* minimum data size (without arenas) of an allocated block */ -#define HEAP_MIN_DATA_SIZE ROUND_SIZE(2 * sizeof(struct list)) -/* minimum size that must remain to shrink an allocated block */ -#define HEAP_MIN_SHRINK_SIZE (HEAP_MIN_DATA_SIZE+sizeof(ARENA_FREE)) - -#define HEAP_NB_FREE_LISTS 5 /* Number of free lists */ - -/* Max size of the blocks on the free lists */ -static const DWORD HEAP_freeListSizes[HEAP_NB_FREE_LISTS] = -{ - 0x10, 0x20, 0x80, 0x200, MAXULONG + 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 }; -typedef union +UCHAR FORCEINLINE +RtlpFindLeastSetBit(ULONG Bits) { - ARENA_FREE arena; - void *aligment[4]; -} FREE_LIST_ENTRY; - -struct tagHEAP; - -typedef struct tagSUBHEAP -{ - SIZE_T size; /* Size of the whole sub-heap */ - SIZE_T commitSize; /* Committed size of the sub-heap */ - DWORD headerSize; /* Size of the heap header */ - struct tagSUBHEAP *next; /* Next sub-heap */ - struct tagHEAP *heap; /* Main heap structure */ - DWORD magic; /* Magic number */ -} SUBHEAP; - -#define SUBHEAP_MAGIC ((DWORD)('S' | ('U'<<8) | ('B'<<16) | ('H'<<24))) - -typedef struct tagHEAP_USER_DATA -{ - LIST_ENTRY ListEntry; - PVOID BaseAddress; - ULONG UserFlags; - PVOID UserValue; -} HEAP_USER_DATA, *PHEAP_USER_DATA; - -typedef struct tagHEAP -{ - SUBHEAP subheap; /* First sub-heap */ - struct list entry; /* Entry in process heap list */ - HEAP_LOCK lock; /* Critical section for serialization */ - DECLSPEC_ALIGN(8) FREE_LIST_ENTRY freeList[HEAP_NB_FREE_LISTS]; /* Free lists */ - DWORD flags; /* Heap flags */ - DWORD magic; /* Magic number */ - PRTL_HEAP_COMMIT_ROUTINE commitRoutine; - LIST_ENTRY UserDataHead; -} HEAP; - -#define HEAP_MAGIC ((DWORD)('H' | ('E'<<8) | ('A'<<16) | ('P'<<24))) - -#define HEAP_DEF_SIZE 0x110000 /* Default heap size = 1Mb + 64Kb */ -#define COMMIT_MASK 0xffff /* bitmask for commit/decommit granularity */ - -static HEAP *processHeap; /* main process heap */ - -static BOOL HEAP_IsRealArena( HEAP *heapPtr, DWORD flags, LPCVOID block, BOOL quiet ); - -/* mark a block of memory as free for debugging purposes */ -static __inline void mark_block_free( void *ptr, SIZE_T size ) -{ - if (TRACE_ON(heap)) memset( ptr, ARENA_FREE_FILLER, size ); -#ifdef VALGRIND_MAKE_NOACCESS - VALGRIND_DISCARD( VALGRIND_MAKE_NOACCESS( ptr, size )); -#endif -} - -/* mark a block of memory as initialized for debugging purposes */ -static __inline void mark_block_initialized( void *ptr, SIZE_T size ) -{ -#ifdef VALGRIND_MAKE_READABLE - VALGRIND_DISCARD( VALGRIND_MAKE_READABLE( ptr, size )); -#endif -} - -/* mark a block of memory as uninitialized for debugging purposes */ -static __inline void mark_block_uninitialized( void *ptr, SIZE_T size ) -{ -#ifdef VALGRIND_MAKE_WRITABLE - VALGRIND_DISCARD( VALGRIND_MAKE_WRITABLE( ptr, size )); -#endif - if (TRACE_ON(heap)) + if (Bits & 0xFFFF) { - memset( ptr, ARENA_INUSE_FILLER, size ); -#ifdef VALGRIND_MAKE_WRITABLE - /* make it uninitialized to valgrind again */ - VALGRIND_DISCARD( VALGRIND_MAKE_WRITABLE( ptr, size )); -#endif - } -} - -/* clear contents of a block of memory */ -static __inline void clear_block( void *ptr, SIZE_T size ) -{ - mark_block_initialized( ptr, size ); - memset( ptr, 0, size ); -} - -/* locate a free list entry of the appropriate size */ -/* size is the size of the whole block including the arena header */ -static __inline unsigned int get_freelist_index( SIZE_T size ) -{ - unsigned int i; - - size -= sizeof(ARENA_FREE); - for (i = 0; i < HEAP_NB_FREE_LISTS - 1; i++) if (size <= HEAP_freeListSizes[i]) break; - return i; -} - -/* get the memory protection type to use for a given heap */ -static inline ULONG get_protection_type( DWORD flags ) -{ - return (flags & HEAP_CREATE_ENABLE_EXECUTE) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; -} - -static RTL_CRITICAL_SECTION_DEBUG process_heap_critsect_debug = -{ - 0, 0, NULL, /* will be set later */ - { &process_heap_critsect_debug.ProcessLocksList, &process_heap_critsect_debug.ProcessLocksList }, - 0, 0, 0, 0, 0 -}; - -/*********************************************************************** - * HEAP_Dump - */ -static void HEAP_Dump( HEAP *heap ) -{ - int i; - SUBHEAP *subheap; - char *ptr; - - DPRINTF( "Heap: %p\n", heap ); - DPRINTF( "Next: %p Sub-heaps: %p", - LIST_ENTRY( heap->entry.next, HEAP, entry ), &heap->subheap ); - subheap = &heap->subheap; - while (subheap->next) - { - DPRINTF( " -> %p", subheap->next ); - subheap = subheap->next; - } - - DPRINTF( "\nFree lists:\n Block Stat Size Id\n" ); - for (i = 0; i < HEAP_NB_FREE_LISTS; i++) - DPRINTF( "%p free %08lx prev=%p next=%p\n", - &heap->freeList[i].arena, HEAP_freeListSizes[i], - LIST_ENTRY( heap->freeList[i].arena.entry.prev, ARENA_FREE, entry ), - LIST_ENTRY( heap->freeList[i].arena.entry.next, ARENA_FREE, entry )); - - subheap = &heap->subheap; - while (subheap) - { - SIZE_T freeSize = 0, usedSize = 0, arenaSize = subheap->headerSize; - DPRINTF( "\n\nSub-heap %p: size=%08lx committed=%08lx\n", - subheap, subheap->size, subheap->commitSize ); - - DPRINTF( "\n Block Stat Size Id\n" ); - ptr = (char*)subheap + subheap->headerSize; - while (ptr < (char *)subheap + subheap->size) - { - if (*(DWORD *)ptr & ARENA_FLAG_FREE) - { - ARENA_FREE *pArena = (ARENA_FREE *)ptr; - DPRINTF( "%p free %08lx prev=%p next=%p\n", - pArena, pArena->size & ARENA_SIZE_MASK, - LIST_ENTRY( pArena->entry.prev, ARENA_FREE, entry ), - LIST_ENTRY( pArena->entry.next, ARENA_FREE, entry ) ); - ptr += sizeof(*pArena) + (pArena->size & ARENA_SIZE_MASK); - arenaSize += sizeof(ARENA_FREE); - freeSize += pArena->size & ARENA_SIZE_MASK; - } - else if (*(DWORD *)ptr & ARENA_FLAG_PREV_FREE) - { - ARENA_INUSE *pArena = (ARENA_INUSE *)ptr; - DPRINTF( "%p Used %08lx back=%p\n", - pArena, pArena->size & ARENA_SIZE_MASK, *((ARENA_FREE **)pArena - 1) ); - ptr += sizeof(*pArena) + (pArena->size & ARENA_SIZE_MASK); - arenaSize += sizeof(ARENA_INUSE); - usedSize += pArena->size & ARENA_SIZE_MASK; - } - else - { - ARENA_INUSE *pArena = (ARENA_INUSE *)ptr; - DPRINTF( "%p used %08lx\n", pArena, pArena->size & ARENA_SIZE_MASK ); - ptr += sizeof(*pArena) + (pArena->size & ARENA_SIZE_MASK); - arenaSize += sizeof(ARENA_INUSE); - usedSize += pArena->size & ARENA_SIZE_MASK; - } - } - DPRINTF( "\nTotal: Size=%08lx Committed=%08lx Free=%08lx Used=%08lx Arenas=%08lx (%ld%%)\n\n", - subheap->size, subheap->commitSize, freeSize, usedSize, - arenaSize, (arenaSize * 100) / subheap->size ); - subheap = subheap->next; - } -} - -#if 0 -static void HEAP_DumpEntry( LPPROCESS_HEAP_ENTRY entry ) -{ - WORD rem_flags; - TRACE( "Dumping entry %p\n", entry ); - TRACE( "lpData\t\t: %p\n", entry->lpData ); - TRACE( "cbData\t\t: %08lx\n", entry->cbData); - TRACE( "cbOverhead\t: %08x\n", entry->cbOverhead); - TRACE( "iRegionIndex\t: %08x\n", entry->iRegionIndex); - TRACE( "WFlags\t\t: "); - if (entry->wFlags & PROCESS_HEAP_REGION) - TRACE( "PROCESS_HEAP_REGION "); - if (entry->wFlags & PROCESS_HEAP_UNCOMMITTED_RANGE) - TRACE( "PROCESS_HEAP_UNCOMMITTED_RANGE "); - if (entry->wFlags & PROCESS_HEAP_ENTRY_BUSY) - TRACE( "PROCESS_HEAP_ENTRY_BUSY "); - if (entry->wFlags & PROCESS_HEAP_ENTRY_MOVEABLE) - TRACE( "PROCESS_HEAP_ENTRY_MOVEABLE "); - if (entry->wFlags & PROCESS_HEAP_ENTRY_DDESHARE) - TRACE( "PROCESS_HEAP_ENTRY_DDESHARE "); - rem_flags = entry->wFlags & - ~(PROCESS_HEAP_REGION | PROCESS_HEAP_UNCOMMITTED_RANGE | - PROCESS_HEAP_ENTRY_BUSY | PROCESS_HEAP_ENTRY_MOVEABLE| - PROCESS_HEAP_ENTRY_DDESHARE); - if (rem_flags) - TRACE( "Unknown %08x", rem_flags); - TRACE( "\n"); - if ((entry->wFlags & PROCESS_HEAP_ENTRY_BUSY ) - && (entry->wFlags & PROCESS_HEAP_ENTRY_MOVEABLE)) - { - /* Treat as block */ - TRACE( "BLOCK->hMem\t\t:%p\n", entry->Block.hMem); - } - if (entry->wFlags & PROCESS_HEAP_REGION) - { - TRACE( "Region.dwCommittedSize\t:%08lx\n",entry->Region.dwCommittedSize); - TRACE( "Region.dwUnCommittedSize\t:%08lx\n",entry->Region.dwUnCommittedSize); - TRACE( "Region.lpFirstBlock\t:%p\n",entry->Region.lpFirstBlock); - TRACE( "Region.lpLastBlock\t:%p\n",entry->Region.lpLastBlock); - } -} -#endif - -static PHEAP_USER_DATA HEAP_GetUserData(HEAP *heapPtr, PVOID BaseAddress) -{ - PLIST_ENTRY CurrentEntry; - PHEAP_USER_DATA udata; - - CurrentEntry = heapPtr->UserDataHead.Flink; - while (CurrentEntry != &heapPtr->UserDataHead) - { - udata = CONTAINING_RECORD(CurrentEntry, HEAP_USER_DATA, ListEntry); - if (udata->BaseAddress == BaseAddress) - return udata; - CurrentEntry = CurrentEntry->Flink; - } - return NULL; -} - -static PHEAP_USER_DATA HEAP_AllocUserData(HEAP *heapPtr, PVOID BaseAddress) -{ - /* Allocate user data entry */ - ARENA_INUSE *pInUse; - PHEAP_USER_DATA udata = RtlAllocateHeap(heapPtr, 0, sizeof(HEAP_USER_DATA)); - if (!udata) return NULL; - udata->BaseAddress = BaseAddress; - InsertTailList(&heapPtr->UserDataHead, &udata->ListEntry); - pInUse = (ARENA_INUSE *)BaseAddress - 1; - pInUse->has_user_data = 1; - return udata; -} - -/*********************************************************************** - * HEAP_GetPtr - * RETURNS - * Pointer to the heap - * NULL: Failure - */ -static HEAP *HEAP_GetPtr( - HANDLE heap /* [in] Handle to the heap */ -) { - HEAP *heapPtr = (HEAP *)heap; - if (!heapPtr || (heapPtr->magic != HEAP_MAGIC)) - { - if (heapPtr) - ERR("Invalid heap %p, magic:%.4s!\n", heap, &heapPtr->magic ); + if (Bits & 0xFF) + return RtlpBitsClearLow[Bits & 0xFF]; /* Lowest byte */ else - ERR("Invalid heap %p!\n", heap ); - //KeDumpStackFrames(NULL); - return NULL; - } - if (TRACE_ON(heap) && !HEAP_IsRealArena( heapPtr, 0, NULL, NOISY )) - { - HEAP_Dump( heapPtr ); - assert( FALSE ); - return NULL; - } - return heapPtr; -} - - -/*********************************************************************** - * HEAP_InsertFreeBlock - * - * Insert a free block into the free list. - */ -static __inline void HEAP_InsertFreeBlock( HEAP *heap, ARENA_FREE *pArena, BOOL last ) -{ - FREE_LIST_ENTRY *pEntry = heap->freeList + get_freelist_index( pArena->size + sizeof(*pArena) ); - if (last) - { - /* insert at end of free list, i.e. before the next free list entry */ - pEntry++; - if (pEntry == &heap->freeList[HEAP_NB_FREE_LISTS]) pEntry = heap->freeList; - list_add_before( &pEntry->arena.entry, &pArena->entry ); + return RtlpBitsClearLow[(Bits >> 8) & 0xFF] + 8; /* 2nd byte */ } else { - /* insert at head of free list */ - list_add_after( &pEntry->arena.entry, &pArena->entry ); + if ((Bits >> 16) & 0xFF) + return RtlpBitsClearLow[(Bits >> 16) & 0xFF] + 16; /* 3rd byte */ + else + return RtlpBitsClearLow[(Bits >> 24) & 0xFF] + 24; /* Highest byte */ } - pArena->size |= ARENA_FLAG_FREE; } +/* Maximum size of a tail-filling pattern used for compare operation */ +UCHAR FillPattern[HEAP_ENTRY_SIZE] = +{ + HEAP_TAIL_FILL, + HEAP_TAIL_FILL, + HEAP_TAIL_FILL, + HEAP_TAIL_FILL, + HEAP_TAIL_FILL, + HEAP_TAIL_FILL, + HEAP_TAIL_FILL, + HEAP_TAIL_FILL +}; -/*********************************************************************** - * HEAP_FindSubHeap - * Find the sub-heap containing a given address. - * - * RETURNS - * Pointer: Success - * NULL: Failure - */ -static SUBHEAP *HEAP_FindSubHeap( - const HEAP *heap, /* [in] Heap pointer */ - LPCVOID ptr /* [in] Address */ -) { - const SUBHEAP *sub = &heap->subheap; - while (sub) + +ULONG NTAPI +RtlCompareMemoryUlong(PVOID Source, ULONG Length, ULONG Value); + +/* FUNCTIONS *****************************************************************/ + +VOID NTAPI +RtlpInitializeHeap(PHEAP Heap, + PULONG HeaderSize, + ULONG Flags, + BOOLEAN AllocateLock, + PVOID Lock) +{ + PVOID NextHeapBase = Heap + 1; + PHEAP_UCR_DESCRIPTOR UcrDescriptor; + ULONG NumUCRs = 8; + ULONG i; + NTSTATUS Status; + + /* Add UCRs size */ + *HeaderSize += NumUCRs * sizeof(*UcrDescriptor); + + /* Prepare a list of UCRs */ + InitializeListHead(&Heap->UCRList); + InitializeListHead(&Heap->UCRSegments); + UcrDescriptor = NextHeapBase; + + for (i=0; i= (const char *)sub) && - ((const char *)ptr < (const char *)sub + sub->size)) return (SUBHEAP*)sub; - sub = sub->next; + InsertTailList(&Heap->UCRList, &UcrDescriptor->ListEntry); } + + NextHeapBase = UcrDescriptor; + // TODO: Add tagging + + /* Round up header size again */ + *HeaderSize = ROUND_UP(*HeaderSize, HEAP_ENTRY_SIZE); + + ASSERT(*HeaderSize <= PAGE_SIZE); + + /* Initialize heap's header */ + Heap->Entry.Size = (*HeaderSize) >> HEAP_ENTRY_SHIFT; + Heap->Entry.Flags = HEAP_ENTRY_BUSY; + + Heap->Signature = HEAP_SIGNATURE; + Heap->Flags = Flags; + Heap->ForceFlags = (Flags & (HEAP_NO_SERIALIZE | + HEAP_GENERATE_EXCEPTIONS | + HEAP_ZERO_MEMORY | + HEAP_REALLOC_IN_PLACE_ONLY | + HEAP_VALIDATE_PARAMETERS_ENABLED | + HEAP_VALIDATE_ALL_ENABLED | + HEAP_TAIL_CHECKING_ENABLED | + HEAP_CREATE_ALIGN_16 | + HEAP_FREE_CHECKING_ENABLED)); + Heap->HeaderValidateCopy = NULL; + Heap->HeaderValidateLength = ((PCHAR)NextHeapBase - (PCHAR)Heap); + + /* Initialize free lists */ + for (i=0; iFreeLists[i]); + } + + /* Initialize "big" allocations list */ + InitializeListHead(&Heap->VirtualAllocdBlocks); + + /* Initialize lock */ + if (AllocateLock) + { + Lock = NextHeapBase; + Status = RtlInitializeHeapLock((PHEAP_LOCK)Lock); + if (!NT_SUCCESS(Status)) + { + DPRINT1("Initializing the lock failed!\n"); + return /*NULL*/; // FIXME! + } + } + + /* Set the lock variable */ + Heap->LockVariable = Lock; +} + +VOID FORCEINLINE +RtlpSetFreeListsBit(PHEAP Heap, + PHEAP_FREE_ENTRY FreeEntry) +{ + ULONG Index, Bit; + + ASSERT(FreeEntry->Size < HEAP_FREELISTS); + + /* Calculate offset in the free list bitmap */ + Index = FreeEntry->Size >> 3; /* = FreeEntry->Size / (sizeof(UCHAR) * 8)*/ + Bit = 1 << (FreeEntry->Size & 7); + + /* Assure it's not already set */ + ASSERT((Heap->u.FreeListsInUseBytes[Index] & Bit) == 0); + + /* Set it */ + Heap->u.FreeListsInUseBytes[Index] |= Bit; +} + +VOID FORCEINLINE +RtlpClearFreeListsBit(PHEAP Heap, + PHEAP_FREE_ENTRY FreeEntry) +{ + ULONG Index, Bit; + + ASSERT(FreeEntry->Size < HEAP_FREELISTS); + + /* Calculate offset in the free list bitmap */ + Index = FreeEntry->Size >> 3; /* = FreeEntry->Size / (sizeof(UCHAR) * 8)*/ + Bit = 1 << (FreeEntry->Size & 7); + + /* Assure it was set and the corresponding free list is empty */ + ASSERT(Heap->u.FreeListsInUseBytes[Index] & Bit); + ASSERT(IsListEmpty(&Heap->FreeLists[FreeEntry->Size])); + + /* Clear it */ + Heap->u.FreeListsInUseBytes[Index] ^= Bit; +} + +VOID NTAPI +RtlpInsertFreeBlockHelper(PHEAP Heap, + PHEAP_FREE_ENTRY FreeEntry, + SIZE_T BlockSize, + BOOLEAN NoFill) +{ + PLIST_ENTRY FreeListHead, Current; + PHEAP_FREE_ENTRY CurrentEntry; + + ASSERT(FreeEntry->Size == BlockSize); + + /* Fill if it's not denied */ + if (!NoFill) + { + FreeEntry->Flags &= ~(HEAP_ENTRY_FILL_PATTERN | + HEAP_ENTRY_EXTRA_PRESENT | + HEAP_ENTRY_BUSY); + + if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) + { + RtlFillMemoryUlong((PCHAR)(FreeEntry + 1), + (BlockSize << HEAP_ENTRY_SHIFT) - sizeof(*FreeEntry), + ARENA_FREE_FILLER); + + FreeEntry->Flags |= HEAP_ENTRY_FILL_PATTERN; + } + } + else + { + /* Clear out all flags except the last entry one */ + FreeEntry->Flags &= HEAP_ENTRY_LAST_ENTRY; + } + + /* Check if PreviousSize of the next entry matches ours */ + if (!(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) + { + ASSERT(((PHEAP_ENTRY)FreeEntry + BlockSize)->PreviousSize = BlockSize); + } + + /* Insert it either into dedicated or non-dedicated list */ + if (BlockSize < HEAP_FREELISTS) + { + /* Dedicated list */ + FreeListHead = &Heap->FreeLists[BlockSize]; + + if (IsListEmpty(FreeListHead)) + { + RtlpSetFreeListsBit(Heap, FreeEntry); + } + } + else + { + /* Non-dedicated one */ + FreeListHead = &Heap->FreeLists[0]; + Current = FreeListHead->Flink; + + /* Find a position where to insert it to (the list must be sorted) */ + while (FreeListHead != Current) + { + CurrentEntry = CONTAINING_RECORD(Current, HEAP_FREE_ENTRY, FreeList); + + if (BlockSize <= CurrentEntry->Size) + break; + + Current = Current->Flink; + } + + FreeListHead = Current; + } + + /* Actually insert it into the list */ + InsertTailList(FreeListHead, &FreeEntry->FreeList); +} + +VOID NTAPI +RtlpInsertFreeBlock(PHEAP Heap, + PHEAP_FREE_ENTRY FreeEntry, + SIZE_T BlockSize) +{ + USHORT Size, PreviousSize; + UCHAR SegmentOffset, Flags; + PHEAP_SEGMENT Segment; + + DPRINT("RtlpInsertFreeBlock(%p %p %x)\n", Heap, FreeEntry, BlockSize); + + /* Increase the free size counter */ + Heap->TotalFreeSize += BlockSize; + + /* Remember certain values */ + Flags = FreeEntry->Flags; + PreviousSize = FreeEntry->PreviousSize; + SegmentOffset = FreeEntry->SegmentOffset; + Segment = Heap->Segments[SegmentOffset]; + + /* Process it */ + while (BlockSize) + { + /* Check for the max size */ + if (BlockSize > HEAP_MAX_BLOCK_SIZE) + { + Size = HEAP_MAX_BLOCK_SIZE; + + /* Special compensation if it goes above limit just by 1 */ + if (BlockSize == (HEAP_MAX_BLOCK_SIZE + 1)) + Size -= 16; + + FreeEntry->Flags = 0; + } + else + { + Size = BlockSize; + FreeEntry->Flags = Flags; + } + + /* Change its size and insert it into a free list */ + FreeEntry->Size = Size; + FreeEntry->PreviousSize = PreviousSize; + FreeEntry->SegmentOffset = SegmentOffset; + + /* Call a helper to actually insert the block */ + RtlpInsertFreeBlockHelper(Heap, FreeEntry, Size, FALSE); + + /* Update sizes */ + PreviousSize = Size; + BlockSize -= Size; + + /* Go to the next entry */ + FreeEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeEntry + Size); + + /* Check if that's all */ + if ((PHEAP_ENTRY)FreeEntry >= Segment->LastValidEntry) return; + } + + /* Update previous size if needed */ + if (!(Flags & HEAP_ENTRY_LAST_ENTRY)) + FreeEntry->PreviousSize = PreviousSize; +} + +VOID NTAPI +RtlpRemoveFreeBlock(PHEAP Heap, + PHEAP_FREE_ENTRY FreeEntry, + BOOLEAN Dedicated, + BOOLEAN NoFill) +{ + SIZE_T Result, RealSize; + PLIST_ENTRY OldBlink, OldFlink; + + // FIXME: Maybe use RemoveEntryList? + + /* Remove the free block */ + OldFlink = FreeEntry->FreeList.Flink; + OldBlink = FreeEntry->FreeList.Blink; + OldBlink->Flink = OldFlink; + OldFlink->Blink = OldBlink; + + /* Update the freelists bitmap */ + if ((OldFlink == OldBlink) && + (Dedicated || (!Dedicated && FreeEntry->Size < HEAP_FREELISTS))) + { + RtlpClearFreeListsBit(Heap, FreeEntry); + } + + /* Fill with pattern if necessary */ + if (!NoFill && + (FreeEntry->Flags & HEAP_ENTRY_FILL_PATTERN)) + { + RealSize = (FreeEntry->Size << HEAP_ENTRY_SHIFT) - sizeof(*FreeEntry); + + /* Deduct extra stuff from block's real size */ + if (FreeEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT && + RealSize > sizeof(HEAP_FREE_ENTRY_EXTRA)) + { + RealSize -= sizeof(HEAP_FREE_ENTRY_EXTRA); + } + + /* Check if the free filler is intact */ + Result = RtlCompareMemoryUlong((PCHAR)(FreeEntry + 1), + RealSize, + ARENA_FREE_FILLER); + + if (Result != RealSize) + { + DPRINT1("Free heap block %p modified at %p after it was freed\n", + FreeEntry, + (PCHAR)(FreeEntry + 1) + Result); + } + } +} + +SIZE_T NTAPI +RtlpGetSizeOfBigBlock(PHEAP_ENTRY HeapEntry) +{ + PHEAP_VIRTUAL_ALLOC_ENTRY VirtualEntry; + + /* Get pointer to the containing record */ + VirtualEntry = CONTAINING_RECORD(HeapEntry, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); + + /* Restore the real size */ + return VirtualEntry->CommitSize - HeapEntry->Size; +} + +PHEAP_UCR_DESCRIPTOR NTAPI +RtlpCreateUnCommittedRange(PHEAP_SEGMENT Segment) +{ + PLIST_ENTRY Entry; + PHEAP_UCR_DESCRIPTOR UcrDescriptor; + PHEAP_UCR_SEGMENT UcrSegment; + PHEAP Heap = Segment->Heap; + SIZE_T ReserveSize = 16 * PAGE_SIZE; + SIZE_T CommitSize = 1 * PAGE_SIZE; + NTSTATUS Status; + + DPRINT("RtlpCreateUnCommittedRange(%p)\n", Segment); + + /* Check if we have unused UCRs */ + if (IsListEmpty(&Heap->UCRList)) + { + /* Get a pointer to the first UCR segment */ + UcrSegment = CONTAINING_RECORD(&Heap->UCRSegments.Flink, HEAP_UCR_SEGMENT, ListEntry); + + /* Check the list of UCR segments */ + if (IsListEmpty(&Heap->UCRSegments) || + UcrSegment->ReservedSize == UcrSegment->CommittedSize) + { + /* We need to create a new one. Reserve 16 pages for it */ + UcrSegment = NULL; + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID *)&UcrSegment, + 0, + &ReserveSize, + MEM_RESERVE, + PAGE_READWRITE); + + if (!NT_SUCCESS(Status)) return NULL; + + /* Commit one page */ + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID *)&UcrSegment, + 0, + &CommitSize, + MEM_COMMIT, + PAGE_READWRITE); + + if (!NT_SUCCESS(Status)) + { + /* Release reserved memory */ + ZwFreeVirtualMemory(NtCurrentProcess(), + (PVOID *)&UcrDescriptor, + &ReserveSize, + MEM_RELEASE); + return NULL; + } + + /* Set it's data */ + UcrSegment->ReservedSize = ReserveSize; + UcrSegment->CommittedSize = CommitSize; + + /* Add it to the head of the list */ + InsertHeadList(&Heap->UCRSegments, &UcrSegment->ListEntry); + + /* Get a pointer to the first available UCR descriptor */ + UcrDescriptor = (PHEAP_UCR_DESCRIPTOR)(UcrSegment + 1); + } + else + { + /* It's possible to use existing UCR segment. Commit one more page */ + UcrDescriptor = (PHEAP_UCR_DESCRIPTOR)((PCHAR)UcrSegment + UcrSegment->CommittedSize); + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID *)&UcrDescriptor, + 0, + &CommitSize, + MEM_COMMIT, + PAGE_READWRITE); + + if (!NT_SUCCESS(Status)) return NULL; + + /* Update sizes */ + UcrSegment->CommittedSize += CommitSize; + } + + /* There is a whole bunch of new UCR descriptors. Put them into the unused list */ + while ((PCHAR)UcrDescriptor < ((PCHAR)UcrSegment + UcrSegment->CommittedSize)) + { + InsertTailList(&Heap->UCRList, &UcrDescriptor->ListEntry); + UcrDescriptor++; + } + } + + /* There are unused UCRs, just get the first one */ + Entry = RemoveHeadList(&Heap->UCRList); + UcrDescriptor = CONTAINING_RECORD(Entry, HEAP_UCR_DESCRIPTOR, ListEntry); + return UcrDescriptor; +} + +VOID NTAPI +RtlpDestroyUnCommittedRange(PHEAP_SEGMENT Segment, + PHEAP_UCR_DESCRIPTOR UcrDescriptor) +{ + /* Zero it out */ + UcrDescriptor->Address = NULL; + UcrDescriptor->Size = 0; + + /* Put it into the heap's list of unused UCRs */ + InsertHeadList(&Segment->Heap->UCRList, &UcrDescriptor->ListEntry); +} + +VOID NTAPI +RtlpInsertUnCommittedPages(PHEAP_SEGMENT Segment, + ULONG_PTR Address, + SIZE_T Size) +{ + PLIST_ENTRY Current; + PHEAP_UCR_DESCRIPTOR UcrDescriptor; + + DPRINT("RtlpInsertUnCommittedPages(%p %p %x)\n", Segment, Address, Size); + + /* Go through the list of UCR descriptors, they are sorted from lowest address + to the highest */ + Current = Segment->UCRSegmentList.Flink; + while(Current != &Segment->UCRSegmentList) + { + UcrDescriptor = CONTAINING_RECORD(Current, HEAP_UCR_DESCRIPTOR, SegmentEntry); + + if ((ULONG_PTR)UcrDescriptor->Address > Address) + { + /* Check for a really lucky case */ + if ((Address + Size) == (ULONG_PTR)UcrDescriptor->Address) + { + /* Exact match */ + UcrDescriptor->Address = (PVOID)Address; + UcrDescriptor->Size += Size; + return; + } + + /* We found the block after which the new one should go */ + break; + } + else if (((ULONG_PTR)UcrDescriptor->Address + UcrDescriptor->Size) == Address) + { + /* Modify this entry */ + Address = (ULONG_PTR)UcrDescriptor->Address; + Size += UcrDescriptor->Size; + + /* Remove it from the list and destroy it */ + RemoveEntryList(Current); + RtlpDestroyUnCommittedRange(Segment, UcrDescriptor); + + Segment->NumberOfUnCommittedRanges--; + } + else + { + /* Advance to the next descriptor */ + Current = Current->Flink; + } + } + + /* Create a new UCR descriptor */ + UcrDescriptor = RtlpCreateUnCommittedRange(Segment); + if (!UcrDescriptor) return; + + UcrDescriptor->Address = (PVOID)Address; + UcrDescriptor->Size = Size; + + /* "Current" is the descriptor after which our one should go */ + InsertTailList(Current, &UcrDescriptor->SegmentEntry); + + DPRINT("Added segment UCR with base %p, size 0x%x\n", Address, Size); + + /* Increase counters */ + Segment->NumberOfUnCommittedRanges++; +} + +PHEAP_FREE_ENTRY NTAPI +RtlpFindAndCommitPages(PHEAP Heap, + PHEAP_SEGMENT Segment, + PSIZE_T Size, + PVOID AddressRequested) +{ + PLIST_ENTRY Current; + ULONG_PTR Address = 0; + PHEAP_UCR_DESCRIPTOR UcrDescriptor, PreviousUcr = NULL; + PHEAP_ENTRY FirstEntry, LastEntry, PreviousLastEntry; + NTSTATUS Status; + + DPRINT("RtlpFindAndCommitPages(%p %p %x %p)\n", Heap, Segment, *Size, Address); + + /* Go through UCRs in a segment */ + Current = Segment->UCRSegmentList.Flink; + while(Current != &Segment->UCRSegmentList) + { + UcrDescriptor = CONTAINING_RECORD(Current, HEAP_UCR_DESCRIPTOR, SegmentEntry); + + /* Check if we can use that one right away */ + if (UcrDescriptor->Size >= *Size && + (UcrDescriptor->Address == AddressRequested || !AddressRequested)) + { + /* Get the address */ + Address = (ULONG_PTR)UcrDescriptor->Address; + + /* Commit it */ + if (Heap->CommitRoutine) + { + Status = Heap->CommitRoutine(Heap, (PVOID *)&Address, Size); + } + else + { + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID *)&Address, + 0, + Size, + MEM_COMMIT, + PAGE_READWRITE); + } + + DPRINT("Committed %d bytes at base %p, UCR size is %d\n", *Size, Address, UcrDescriptor->Size); + + /* Fail in unsuccessful case */ + if (!NT_SUCCESS(Status)) + { + DPRINT1("Committing page failed with status 0x%08X\n", Status); + return NULL; + } + + /* Update tracking numbers */ + Segment->NumberOfUnCommittedPages -= *Size / PAGE_SIZE; + + /* Calculate first and last entries */ + FirstEntry = (PHEAP_ENTRY)Address; + + if ((Segment->LastEntryInSegment->Flags & HEAP_ENTRY_LAST_ENTRY) && + (ULONG_PTR)(Segment->LastEntryInSegment + Segment->LastEntryInSegment->Size) == (ULONG_PTR)UcrDescriptor->Address) + { + LastEntry = Segment->LastEntryInSegment; + } + else + { + /* Go through the entries to find the last one */ + + if (PreviousUcr) + LastEntry = (PHEAP_ENTRY)((ULONG_PTR)PreviousUcr->Address + PreviousUcr->Size); + else + LastEntry = Segment->FirstEntry; + + while (!(LastEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) + { + PreviousLastEntry = LastEntry; + LastEntry += LastEntry->Size; + + if ((ULONG_PTR)LastEntry >= (ULONG_PTR)Segment->LastValidEntry || + LastEntry->Size == 0) + { + if (LastEntry == (PHEAP_ENTRY)Address) + { + /* Found it */ + LastEntry = PreviousLastEntry; + break; + } + + DPRINT1("Last entry not found in a committed range near to %p\n", PreviousLastEntry); + return NULL; + } + } + } + + /* Unmark it as a last entry */ + LastEntry->Flags &= ~HEAP_ENTRY_LAST_ENTRY; + + /* Update UCR descriptor */ + UcrDescriptor->Address = (PVOID)((ULONG_PTR)UcrDescriptor->Address + *Size); + UcrDescriptor->Size -= *Size; + + DPRINT("Updating UcrDescriptor %p, new Address %p, size %d\n", + UcrDescriptor, UcrDescriptor->Address, UcrDescriptor->Size); + + /* Check if anything left in this UCR */ + if (UcrDescriptor->Size == 0) + { + /* It's fully exhausted */ + if (UcrDescriptor->Address == Segment->LastValidEntry) + { + FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY; + Segment->LastEntryInSegment = FirstEntry; + } + else + { + FirstEntry->Flags = 0; + Segment->LastEntryInSegment = Segment->FirstEntry; + } + + /* This UCR needs to be removed because it became useless */ + RemoveEntryList(&UcrDescriptor->SegmentEntry); + + RtlpDestroyUnCommittedRange(Segment, UcrDescriptor); + Segment->NumberOfUnCommittedRanges--; + } + else + { + FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY; + Segment->LastEntryInSegment = FirstEntry; + } + + /* Set various first entry fields*/ + FirstEntry->SegmentOffset = LastEntry->SegmentOffset; + FirstEntry->Size = *Size >> HEAP_ENTRY_SHIFT; + FirstEntry->PreviousSize = LastEntry->Size; + + /* Update previous size */ + if (!(FirstEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) + (FirstEntry + FirstEntry->Size)->PreviousSize = FirstEntry->Size; + + /* We're done */ + return (PHEAP_FREE_ENTRY)FirstEntry; + } + + /* Advance to the next descriptor */ + PreviousUcr = UcrDescriptor; + Current = Current->Flink; + } + + return NULL; +} + +VOID NTAPI +RtlpDeCommitFreeBlock(PHEAP Heap, + PHEAP_FREE_ENTRY FreeEntry, + SIZE_T Size) +{ + PHEAP_SEGMENT Segment; + PHEAP_ENTRY PrecedingInUseEntry = NULL, NextInUseEntry = NULL; + PHEAP_FREE_ENTRY NextFreeEntry; + PHEAP_UCR_DESCRIPTOR UcrDescriptor; + ULONG PrecedingSize, NextSize, DecommitSize; + ULONG_PTR DecommitBase; + NTSTATUS Status; + + DPRINT("Decommitting %p %p %x\n", Heap, FreeEntry, Size); + + /* We can't decommit if there is a commit routine! */ + if (Heap->CommitRoutine) + { + /* Just add it back the usual way */ + RtlpInsertFreeBlock(Heap, FreeEntry, Size); + return; + } + + /* Get the segment */ + Segment = Heap->Segments[FreeEntry->SegmentOffset]; + + /* Get the preceding entry */ + DecommitBase = ROUND_UP(FreeEntry, PAGE_SIZE); + PrecedingSize = (PHEAP_ENTRY)DecommitBase - (PHEAP_ENTRY)FreeEntry; + + if (PrecedingSize == 1) + { + /* Just 1 heap entry, increase the base/size */ + DecommitBase += PAGE_SIZE; + PrecedingSize += PAGE_SIZE >> HEAP_ENTRY_SHIFT; + } + else if (FreeEntry->PreviousSize && + (DecommitBase == (ULONG_PTR)FreeEntry)) + { + PrecedingInUseEntry = (PHEAP_ENTRY)FreeEntry - FreeEntry->PreviousSize; + } + + /* Get the next entry */ + NextFreeEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeEntry + Size); + DecommitSize = ROUND_DOWN(NextFreeEntry, PAGE_SIZE); + NextSize = (PHEAP_ENTRY)NextFreeEntry - (PHEAP_ENTRY)DecommitSize; + + if (NextSize == 1) + { + /* Just 1 heap entry, increase the size */ + DecommitSize -= PAGE_SIZE; + NextSize += PAGE_SIZE >> HEAP_ENTRY_SHIFT; + } + else if (NextSize == 0 && + !(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) + { + NextInUseEntry = (PHEAP_ENTRY)NextFreeEntry; + } + + NextFreeEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)NextFreeEntry - NextSize); + + /* Calculate real decommit size */ + if (DecommitSize > DecommitBase) + { + DecommitSize -= DecommitBase; + } + else + { + /* Nothing to decommit */ + RtlpInsertFreeBlock(Heap, FreeEntry, Size); + return; + } + + /* A decommit is necessary. Create a UCR descriptor */ + UcrDescriptor = RtlpCreateUnCommittedRange(Segment); + if (!UcrDescriptor) + { + DPRINT1("HEAP: Failed to create UCR descriptor\n"); + RtlpInsertFreeBlock(Heap, FreeEntry, PrecedingSize); + return; + } + + /* Decommit the memory */ + Status = ZwFreeVirtualMemory(NtCurrentProcess(), + (PVOID *)&DecommitBase, + &DecommitSize, + MEM_DECOMMIT); + + /* Delete that UCR. This is needed to assure there is an unused UCR entry in the list */ + RtlpDestroyUnCommittedRange(Segment, UcrDescriptor); + + if (!NT_SUCCESS(Status)) + { + RtlpInsertFreeBlock(Heap, FreeEntry, Size); + return; + } + + /* Insert uncommitted pages */ + RtlpInsertUnCommittedPages(Segment, DecommitBase, DecommitSize); + Segment->NumberOfUnCommittedPages += (DecommitSize / PAGE_SIZE); + + if (PrecedingSize) + { + /* Adjust size of this free entry and insert it */ + FreeEntry->Flags = HEAP_ENTRY_LAST_ENTRY; + FreeEntry->Size = PrecedingSize; + Heap->TotalFreeSize += PrecedingSize; + + /* Set last entry in the segment to this entry */ + Segment->LastEntryInSegment = (PHEAP_ENTRY)FreeEntry; + + /* Insert it into the free list */ + RtlpInsertFreeBlockHelper(Heap, FreeEntry, PrecedingSize, FALSE); + } + else if (PrecedingInUseEntry) + { + /* Adjust preceding in use entry */ + PrecedingInUseEntry->Flags |= HEAP_ENTRY_LAST_ENTRY; + Segment->LastEntryInSegment = PrecedingInUseEntry; + } else if ((ULONG_PTR)Segment->LastEntryInSegment >= DecommitBase && + ((PCHAR)Segment->LastEntryInSegment < ((PCHAR)DecommitBase + DecommitSize))) + { + /* Update this segment's last entry */ + Segment->LastEntryInSegment = Segment->FirstEntry; + } + + /* Now the next one */ + if (NextSize) + { + /* Adjust size of this free entry and insert it */ + NextFreeEntry->Flags = 0; + NextFreeEntry->PreviousSize = 0; + NextFreeEntry->SegmentOffset = Segment->Entry.SegmentOffset; + NextFreeEntry->Size = NextSize; + + ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)NextFreeEntry + NextSize))->PreviousSize = NextSize; + + Heap->TotalFreeSize += NextSize; + RtlpInsertFreeBlockHelper(Heap, NextFreeEntry, NextSize, FALSE); + } + else if (NextInUseEntry) + { + NextInUseEntry->PreviousSize = 0; + } +} + +BOOLEAN NTAPI +RtlpInitializeHeapSegment(PHEAP Heap, + PHEAP_SEGMENT Segment, + UCHAR SegmentIndex, + ULONG Flags, + PVOID BaseAddress, + PVOID UncommittedBase, + PVOID LimitAddress) +{ + ULONG Pages, CommitSize; + PHEAP_ENTRY HeapEntry; + USHORT PreviousSize = 0, NewSize; + NTSTATUS Status; + + Pages = ((PCHAR)LimitAddress - (PCHAR)BaseAddress) / PAGE_SIZE; + + HeapEntry = (PHEAP_ENTRY)ROUND_UP(Segment + 1, HEAP_ENTRY_SIZE); + + DPRINT("RtlpInitializeHeapSegment(%p %p %x %x %p %p %p)\n", Heap, Segment, SegmentIndex, Flags, BaseAddress, UncommittedBase, LimitAddress); + DPRINT("Pages %x, HeapEntry %p, sizeof(HEAP_SEGMENT) %x\n", Pages, HeapEntry, sizeof(HEAP_SEGMENT)); + + /* Check if it's the first segment and remember its size */ + if (Heap == BaseAddress) + PreviousSize = Heap->Entry.Size; + + NewSize = ((PCHAR)HeapEntry - (PCHAR)Segment) >> HEAP_ENTRY_SHIFT; + + if ((PVOID)(HeapEntry + 1) >= UncommittedBase) + { + /* Check if it goes beyond the limit */ + if ((PVOID)(HeapEntry + 1) >= LimitAddress) + return FALSE; + + /* Need to commit memory */ + CommitSize = (PCHAR)(HeapEntry + 1) - (PCHAR)UncommittedBase; + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID)&UncommittedBase, + 0, + &CommitSize, + MEM_COMMIT, + PAGE_READWRITE); + if (!NT_SUCCESS(Status)) + { + DPRINT1("Committing page failed with status 0x%08X\n", Status); + return FALSE; + } + + DPRINT("Committed %d bytes at base %p\n", CommitSize, UncommittedBase); + + /* Calcule the new uncommitted base */ + UncommittedBase = (PVOID)((PCHAR)UncommittedBase + CommitSize); + } + + /* Initialize the segment entry */ + Segment->Entry.PreviousSize = PreviousSize; + Segment->Entry.Size = NewSize; + Segment->Entry.Flags = HEAP_ENTRY_BUSY; + Segment->Entry.SegmentOffset = SegmentIndex; + + /* Initialize the segment itself */ + Segment->SegmentSignature = HEAP_SEGMENT_SIGNATURE; + Segment->Heap = Heap; + Segment->BaseAddress = BaseAddress; + Segment->FirstEntry = HeapEntry; + Segment->LastValidEntry = (PHEAP_ENTRY)((PCHAR)BaseAddress + Pages * PAGE_SIZE); + Segment->NumberOfPages = Pages; + Segment->NumberOfUnCommittedPages = ((PCHAR)LimitAddress - (PCHAR)UncommittedBase) / PAGE_SIZE; + InitializeListHead(&Segment->UCRSegmentList); + + /* Insert uncommitted pages into UCR (uncommitted ranges) list */ + if (Segment->NumberOfUnCommittedPages) + { + RtlpInsertUnCommittedPages(Segment, (ULONG_PTR)UncommittedBase, Segment->NumberOfUnCommittedPages * PAGE_SIZE); + } + + /* Set the segment index pointer */ + Heap->Segments[SegmentIndex] = Segment; + + /* Prepare a free heap entry */ + HeapEntry->Flags = HEAP_ENTRY_LAST_ENTRY; + HeapEntry->PreviousSize = Segment->Entry.Size; + HeapEntry->SegmentOffset = SegmentIndex; + + /* Set last entry in segment */ + Segment->LastEntryInSegment = HeapEntry; + + /* Insert it */ + RtlpInsertFreeBlock(Heap, (PHEAP_FREE_ENTRY)HeapEntry, (PHEAP_ENTRY)UncommittedBase - HeapEntry); + + return TRUE; +} + +VOID NTAPI +RtlpDestroyHeapSegment(PHEAP_SEGMENT Segment) +{ + NTSTATUS Status; + PVOID BaseAddress; + SIZE_T Size = 0; + + /* Make sure it's not user allocated */ + if (Segment->SegmentFlags & HEAP_USER_ALLOCATED) return; + + BaseAddress = Segment->BaseAddress; + DPRINT("Destroying segment %p, BA %p\n", Segment, BaseAddress); + + /* Release virtual memory */ + Status = ZwFreeVirtualMemory(NtCurrentProcess(), + &BaseAddress, + &Size, + MEM_RELEASE); + + if (!NT_SUCCESS(Status)) + { + DPRINT1("HEAP: Failed to release segment's memory with status 0x%08X\n", Status); + } +} + +/* Usermode only! */ +VOID NTAPI +RtlpAddHeapToProcessList(PHEAP Heap) +{ + PPEB Peb; + + /* Get PEB */ + Peb = RtlGetCurrentPeb(); + + /* Acquire the lock */ + RtlEnterHeapLock(&RtlpProcessHeapsListLock); + + //_SEH2_TRY { + /* Check if max number of heaps reached */ + if (Peb->NumberOfHeaps == Peb->MaximumNumberOfHeaps) + { + // TODO: Handle this case + ASSERT(FALSE); + } + + /* Add the heap to the process heaps */ + Peb->ProcessHeaps[Peb->NumberOfHeaps] = Heap; + Peb->NumberOfHeaps++; + Heap->ProcessHeapsListIndex = Peb->NumberOfHeaps; + // } _SEH2_FINALLY { + + /* Release the lock */ + RtlLeaveHeapLock(&RtlpProcessHeapsListLock); + + // } _SEH2_END +} + +/* Usermode only! */ +VOID NTAPI +RtlpRemoveHeapFromProcessList(PHEAP Heap) +{ + PPEB Peb; + PHEAP *Current, *Next; + ULONG Count; + + /* Get PEB */ + Peb = RtlGetCurrentPeb(); + + /* Acquire the lock */ + RtlEnterHeapLock(&RtlpProcessHeapsListLock); + + /* Check if we don't need anything to do */ + if ((Heap->ProcessHeapsListIndex == 0) || + (Heap->ProcessHeapsListIndex > Peb->NumberOfHeaps) || + (Peb->NumberOfHeaps == 0)) + { + /* Release the lock */ + RtlLeaveHeapLock(&RtlpProcessHeapsListLock); + + return; + } + + /* The process actually has more than one heap. + Use classic, lernt from university times algorithm for removing an entry + from a static array */ + + Current = (PHEAP *)&Peb->ProcessHeaps[Heap->ProcessHeapsListIndex - 1]; + Next = Current + 1; + + /* How many items we need to shift to the left */ + Count = Peb->NumberOfHeaps - (Heap->ProcessHeapsListIndex - 1); + + /* Move them all in a loop */ + while (--Count) + { + /* Copy it and advance next pointer */ + *Current = *Next; + + /* Update its index */ + (*Current)->ProcessHeapsListIndex -= 1; + + /* Advance pointers */ + Current++; + Next++; + } + + /* Decrease total number of heaps */ + Peb->NumberOfHeaps--; + + /* Zero last unused item */ + Peb->ProcessHeaps[Peb->NumberOfHeaps] = NULL; + Heap->ProcessHeapsListIndex = 0; + + /* Release the lock */ + RtlLeaveHeapLock(&RtlpProcessHeapsListLock); +} + +PHEAP_FREE_ENTRY NTAPI +RtlpCoalesceHeap(PHEAP Heap) +{ + UNIMPLEMENTED; + return NULL; +} + +PHEAP_FREE_ENTRY NTAPI +RtlpCoalesceFreeBlocks (PHEAP Heap, + PHEAP_FREE_ENTRY FreeEntry, + PSIZE_T FreeSize, + BOOLEAN Remove) +{ + PHEAP_FREE_ENTRY CurrentEntry, NextEntry; + + /* Get the previous entry */ + CurrentEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeEntry - FreeEntry->PreviousSize); + + /* Check it */ + if (CurrentEntry != FreeEntry && + !(CurrentEntry->Flags & HEAP_ENTRY_BUSY) && + (*FreeSize + CurrentEntry->Size) <= HEAP_MAX_BLOCK_SIZE) + { + ASSERT(FreeEntry->PreviousSize == CurrentEntry->Size); + + /* Remove it if asked for */ + if (Remove) + { + RtlpRemoveFreeBlock(Heap, FreeEntry, FALSE, FALSE); + Heap->TotalFreeSize -= FreeEntry->Size; + + /* Remove it only once! */ + Remove = FALSE; + } + + /* Remove previous entry too */ + RtlpRemoveFreeBlock(Heap, CurrentEntry, FALSE, FALSE); + + /* Copy flags */ + CurrentEntry->Flags = FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY; + + /* Update last entry in the segment */ + if (CurrentEntry->Flags & HEAP_ENTRY_LAST_ENTRY) + Heap->Segments[CurrentEntry->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)CurrentEntry; + + /* Advance FreeEntry and update sizes */ + FreeEntry = CurrentEntry; + *FreeSize = *FreeSize + CurrentEntry->Size; + Heap->TotalFreeSize -= CurrentEntry->Size; + FreeEntry->Size = *FreeSize; + + /* Also update previous size if needed */ + if (!(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) + { + ((PHEAP_ENTRY)FreeEntry + *FreeSize)->PreviousSize = *FreeSize; + } + } + + /* Check the next block if it exists */ + if (!(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) + { + NextEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeEntry + *FreeSize); + + if (!(NextEntry->Flags & HEAP_ENTRY_BUSY) && + NextEntry->Size + *FreeSize <= HEAP_MAX_BLOCK_SIZE) + { + ASSERT(*FreeSize == NextEntry->PreviousSize); + + /* Remove it if asked for */ + if (Remove) + { + RtlpRemoveFreeBlock(Heap, FreeEntry, FALSE, FALSE); + Heap->TotalFreeSize -= FreeEntry->Size; + } + + /* Copy flags */ + FreeEntry->Flags = NextEntry->Flags & HEAP_ENTRY_LAST_ENTRY; + + /* Update last entry in the segment */ + if (FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY) + Heap->Segments[FreeEntry->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)FreeEntry; + + /* Remove next entry now */ + RtlpRemoveFreeBlock(Heap, NextEntry, FALSE, FALSE); + + /* Update sizes */ + *FreeSize = *FreeSize + NextEntry->Size; + Heap->TotalFreeSize -= NextEntry->Size; + FreeEntry->Size = *FreeSize; + + /* Also update previous size if needed */ + if (!(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) + { + ((PHEAP_ENTRY)FreeEntry + *FreeSize)->PreviousSize = *FreeSize; + } + } + } + return FreeEntry; +} + +PHEAP_FREE_ENTRY NTAPI +RtlpExtendHeap(PHEAP Heap, + SIZE_T Size) +{ + ULONG Pages; + UCHAR Index, EmptyIndex; + SIZE_T FreeSize, CommitSize, ReserveSize; + PHEAP_SEGMENT Segment; + PHEAP_FREE_ENTRY FreeEntry; + NTSTATUS Status; + + DPRINT("RtlpExtendHeap(%p %x)\n", Heap, Size); + + /* Calculate amount in pages */ + Pages = (Size + PAGE_SIZE - 1) / PAGE_SIZE; + FreeSize = Pages * PAGE_SIZE; + DPRINT("Pages %x, FreeSize %x. Going through segments...\n", Pages, FreeSize); + + /* Find an empty segment */ + EmptyIndex = HEAP_SEGMENTS; + for (Index = 0; Index < HEAP_SEGMENTS; Index++) + { + Segment = Heap->Segments[Index]; + + if (Segment) DPRINT("Segment[%d] %p with NOUCP %x\n", Index, Segment, Segment->NumberOfUnCommittedPages); + + /* Check if its size suits us */ + if (Segment && + Pages <= Segment->NumberOfUnCommittedPages) + { + DPRINT("This segment is suitable\n"); + + /* Commit needed amount */ + FreeEntry = RtlpFindAndCommitPages(Heap, Segment, &FreeSize, NULL); + + /* Coalesce it with adjacent entries */ + if (FreeEntry) + { + FreeSize = FreeSize >> HEAP_ENTRY_SHIFT; + FreeEntry = RtlpCoalesceFreeBlocks(Heap, FreeEntry, &FreeSize, FALSE); + RtlpInsertFreeBlock(Heap, FreeEntry, FreeSize); + return FreeEntry; + } + } + else if (!Segment && + EmptyIndex == HEAP_SEGMENTS) + { + /* Remember the first unused segment index */ + EmptyIndex = Index; + } + } + + /* No luck, need to grow the heap */ + if ((Heap->Flags & HEAP_GROWABLE) && + (EmptyIndex != HEAP_SEGMENTS)) + { + Segment = NULL; + + /* Reserve the memory */ + if ((Size + PAGE_SIZE) <= Heap->SegmentReserve) + ReserveSize = Heap->SegmentReserve; + else + ReserveSize = Size + PAGE_SIZE; + + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID)&Segment, + 0, + &ReserveSize, + MEM_RESERVE, + PAGE_READWRITE); + + /* If it failed, retry again with a half division algorithm */ + while (!NT_SUCCESS(Status) && + ReserveSize != Size + PAGE_SIZE) + { + ReserveSize /= 2; + + if (ReserveSize < (Size + PAGE_SIZE)) + ReserveSize = Size + PAGE_SIZE; + + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID)&Segment, + 0, + &ReserveSize, + MEM_RESERVE, + PAGE_READWRITE); + } + + /* Proceed only if it's success */ + if (NT_SUCCESS(Status)) + { + Heap->SegmentReserve += ReserveSize; + + /* Now commit the memory */ + if ((Size + PAGE_SIZE) <= Heap->SegmentCommit) + CommitSize = Heap->SegmentCommit; + else + CommitSize = Size + PAGE_SIZE; + + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID)&Segment, + 0, + &CommitSize, + MEM_COMMIT, + PAGE_READWRITE); + + DPRINT("Committed %d bytes at base %p\n", CommitSize, Segment); + + /* Initialize heap segment if commit was successful */ + if (NT_SUCCESS(Status)) + { + if (!RtlpInitializeHeapSegment(Heap, Segment, EmptyIndex, 0, Segment, + (PCHAR)Segment + CommitSize, (PCHAR)Segment + ReserveSize)) + { + Status = STATUS_NO_MEMORY; + } + } + + /* If everything worked - cool */ + if (NT_SUCCESS(Status)) return (PHEAP_FREE_ENTRY)Segment->FirstEntry; + + DPRINT1("Committing failed with status 0x%08X\n", Status); + + /* Nope, we failed. Free memory */ + ZwFreeVirtualMemory(NtCurrentProcess(), + (PVOID)&Segment, + &ReserveSize, + MEM_RELEASE); + } + else + { + DPRINT1("Reserving failed with status 0x%08X\n", Status); + } + } + + if (RtlpGetMode() == UserMode) + { + /* If coalescing on free is disabled in usermode, then do it here */ + if (Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE) + { + FreeEntry = RtlpCoalesceHeap(Heap); + + /* If it's a suitable one - return it */ + if (FreeEntry && + FreeEntry->Size >= Size) + { + return FreeEntry; + } + } + } + return NULL; } /*********************************************************************** - * HEAP_Commit - * - * Make sure the heap storage is committed for a given size in the specified arena. - */ -static __inline BOOL HEAP_Commit( SUBHEAP *subheap, ARENA_INUSE *pArena, SIZE_T data_size ) -{ - NTSTATUS Status; - void *ptr = (char *)(pArena + 1) + data_size + sizeof(ARENA_FREE); - SIZE_T size = (char *)ptr - (char *)subheap; - size = (size + COMMIT_MASK) & ~COMMIT_MASK; - if (size > subheap->size) size = subheap->size; - if (size <= subheap->commitSize) return TRUE; - size -= subheap->commitSize; - ptr = (char *)subheap + subheap->commitSize; - if (subheap->heap->commitRoutine != NULL) - { - Status = subheap->heap->commitRoutine(subheap->heap, &ptr, &size); - } - else - { - Status = ZwAllocateVirtualMemory( NtCurrentProcess(), &ptr, 0, - &size, MEM_COMMIT, get_protection_type(subheap->heap->flags) ); - } - if (!NT_SUCCESS(Status)) - { - WARN("Could not commit %08lx bytes at %p for heap %p\n", - size, ptr, subheap->heap ); - return FALSE; - } - subheap->commitSize += size; - return TRUE; -} - -#if 0 -/*********************************************************************** - * HEAP_Decommit - * - * If possible, decommit the heap storage from (including) 'ptr'. - */ -static inline BOOL HEAP_Decommit( SUBHEAP *subheap, void *ptr ) -{ - void *addr; - SIZE_T decommit_size; - SIZE_T size = (char *)ptr - (char *)subheap; - - /* round to next block and add one full block */ - size = ((size + COMMIT_MASK) & ~COMMIT_MASK) + COMMIT_MASK + 1; - if (size >= subheap->commitSize) return TRUE; - decommit_size = subheap->commitSize - size; - addr = (char *)subheap + size; - - if (ZwFreeVirtualMemory( NtCurrentProcess(), &addr, &decommit_size, MEM_DECOMMIT )) - { - WARN("Could not decommit %08lx bytes at %p for heap %p\n", - decommit_size, (char *)subheap + size, subheap->heap ); - return FALSE; - } - subheap->commitSize -= decommit_size; - return TRUE; -} -#endif - -/*********************************************************************** - * HEAP_CreateFreeBlock - * - * Create a free block at a specified address. 'size' is the size of the - * whole block, including the new arena. - */ -static void HEAP_CreateFreeBlock( SUBHEAP *subheap, void *ptr, SIZE_T size ) -{ - ARENA_FREE *pFree; - char *pEnd; - BOOL last; - - /* Create a free arena */ - mark_block_uninitialized( ptr, sizeof( ARENA_FREE ) ); - pFree = (ARENA_FREE *)ptr; - pFree->magic = ARENA_FREE_MAGIC; - - /* If debugging, erase the freed block content */ - - pEnd = (char *)ptr + size; - if (pEnd > (char *)subheap + subheap->commitSize) pEnd = (char *)subheap + subheap->commitSize; - if (pEnd > (char *)(pFree + 1)) mark_block_free( pFree + 1, pEnd - (char *)(pFree + 1) ); - - /* Check if next block is free also */ - - if (((char *)ptr + size < (char *)subheap + subheap->size) && - (*(DWORD *)((char *)ptr + size) & ARENA_FLAG_FREE)) - { - /* Remove the next arena from the free list */ - ARENA_FREE *pNext = (ARENA_FREE *)((char *)ptr + size); - list_remove( &pNext->entry ); - size += (pNext->size & ARENA_SIZE_MASK) + sizeof(*pNext); - mark_block_free( pNext, sizeof(ARENA_FREE) ); - } - - /* Set the next block PREV_FREE flag and pointer */ - - last = ((char *)ptr + size >= (char *)subheap + subheap->size); - if (!last) - { - DWORD *pNext = (DWORD *)((char *)ptr + size); - *pNext |= ARENA_FLAG_PREV_FREE; - mark_block_initialized( pNext - 1, sizeof( ARENA_FREE * ) ); - *((ARENA_FREE **)pNext - 1) = pFree; - } - - /* Last, insert the new block into the free list */ - - pFree->size = size - sizeof(*pFree); - HEAP_InsertFreeBlock( subheap->heap, pFree, last ); -} - - -/*********************************************************************** - * HEAP_MakeInUseBlockFree - * - * Turn an in-use block into a free block. Can also decommit the end of - * the heap, and possibly even free the sub-heap altogether. - */ -static void HEAP_MakeInUseBlockFree( SUBHEAP *subheap, ARENA_INUSE *pArena ) -{ - ARENA_FREE *pFree; - SIZE_T size = (pArena->size & ARENA_SIZE_MASK) + sizeof(*pArena); - PHEAP_USER_DATA udata; - - /* Find and free user data */ - if (pArena->has_user_data) - { - udata = HEAP_GetUserData(subheap->heap, pArena + 1); - if (udata) - { - RemoveEntryList(&udata->ListEntry); - RtlFreeHeap(subheap->heap, 0, udata); - } - } - - /* Check if we can merge with previous block */ - - if (pArena->size & ARENA_FLAG_PREV_FREE) - { - pFree = *((ARENA_FREE **)pArena - 1); - size += (pFree->size & ARENA_SIZE_MASK) + sizeof(ARENA_FREE); - /* Remove it from the free list */ - list_remove( &pFree->entry ); - } - else pFree = (ARENA_FREE *)pArena; - - /* Create a free block */ - - HEAP_CreateFreeBlock( subheap, pFree, size ); - size = (pFree->size & ARENA_SIZE_MASK) + sizeof(ARENA_FREE); - if ((char *)pFree + size < (char *)subheap + subheap->size) - return; /* Not the last block, so nothing more to do */ - - /* Free the whole sub-heap if it's empty and not the original one */ - - if (((char *)pFree == (char *)subheap + subheap->headerSize) && - (subheap != &subheap->heap->subheap)) - { - SIZE_T size = 0; - SUBHEAP *pPrev = &subheap->heap->subheap; - /* Remove the free block from the list */ - list_remove( &pFree->entry ); - /* Remove the subheap from the list */ - while (pPrev && (pPrev->next != subheap)) pPrev = pPrev->next; - if (pPrev) pPrev->next = subheap->next; - /* Free the memory */ - subheap->magic = 0; - ZwFreeVirtualMemory( NtCurrentProcess(), (void **)&subheap, &size, MEM_RELEASE ); - return; - } - - /* Decommit the end of the heap */ -} - -/*********************************************************************** - * HEAP_ShrinkBlock - * - * Shrink an in-use block. - */ -static void HEAP_ShrinkBlock(SUBHEAP *subheap, ARENA_INUSE *pArena, SIZE_T size) -{ - if ((pArena->size & ARENA_SIZE_MASK) >= size + HEAP_MIN_SHRINK_SIZE) - { - HEAP_CreateFreeBlock( subheap, (char *)(pArena + 1) + size, - (pArena->size & ARENA_SIZE_MASK) - size ); - /* assign size plus previous arena flags */ - pArena->size = size | (pArena->size & ~ARENA_SIZE_MASK); - } - else - { - /* Turn off PREV_FREE flag in next block */ - char *pNext = (char *)(pArena + 1) + (pArena->size & ARENA_SIZE_MASK); - if (pNext < (char *)subheap + subheap->size) - *(DWORD *)pNext &= ~ARENA_FLAG_PREV_FREE; - } -} - -/*********************************************************************** - * HEAP_InitSubHeap - */ -static BOOL HEAP_InitSubHeap( HEAP *heap, LPVOID address, DWORD flags, - SIZE_T commitSize, SIZE_T totalSize, - PRTL_HEAP_PARAMETERS Parameters) -{ - SUBHEAP *subheap; - FREE_LIST_ENTRY *pEntry; - int i; - NTSTATUS Status; - - if (!address && ZwAllocateVirtualMemory( NtCurrentProcess(), &address, 0, - &commitSize, MEM_COMMIT, get_protection_type(flags) )) - { - WARN("Could not commit %08lx bytes for sub-heap %p\n", commitSize, address ); - return FALSE; - } - - /* Fill the sub-heap structure */ - - subheap = (SUBHEAP *)address; - subheap->heap = heap; - subheap->size = totalSize; - subheap->commitSize = commitSize; - subheap->magic = SUBHEAP_MAGIC; - - if ( subheap != (SUBHEAP *)heap ) - { - /* If this is a secondary subheap, insert it into list */ - - subheap->headerSize = ROUND_SIZE( sizeof(SUBHEAP) ); - subheap->next = heap->subheap.next; - heap->subheap.next = subheap; - } - else - { - /* If this is a primary subheap, initialize main heap */ - - subheap->headerSize = ROUND_SIZE( sizeof(HEAP) ); - subheap->next = NULL; - heap->flags = flags; - heap->magic = HEAP_MAGIC; - if (Parameters) - heap->commitRoutine = Parameters->CommitRoutine; - else - heap->commitRoutine = NULL; - InitializeListHead(&heap->UserDataHead); - - /* Build the free lists */ - - list_init( &heap->freeList[0].arena.entry ); - for (i = 0, pEntry = heap->freeList; i < HEAP_NB_FREE_LISTS; i++, pEntry++) - { - pEntry->arena.size = 0 | ARENA_FLAG_FREE; - pEntry->arena.magic = ARENA_FREE_MAGIC; - if (i) list_add_after( &pEntry[-1].arena.entry, &pEntry->arena.entry ); - } - - /* Initialize critical section */ - - if (RtlpGetMode() == UserMode) - { - if (!processHeap) /* do it by hand to avoid memory allocations */ - { - heap->lock.CriticalSection.DebugInfo = &process_heap_critsect_debug; - heap->lock.CriticalSection.LockCount = -1; - heap->lock.CriticalSection.RecursionCount = 0; - heap->lock.CriticalSection.OwningThread = 0; - heap->lock.CriticalSection.LockSemaphore = 0; - heap->lock.CriticalSection.SpinCount = 0; - process_heap_critsect_debug.CriticalSection = &heap->lock.CriticalSection; - } - else RtlInitializeHeapLock( &heap->lock ); - } - } - - /* Commit memory */ - if (heap->commitRoutine) - { - if (subheap != (SUBHEAP *)heap) - { - Status = heap->commitRoutine(heap, &address, &commitSize); - } - else - { - /* the caller is responsible for committing the first page! */ - Status = STATUS_SUCCESS; - } - } - else - { - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - &address, - 0, - &commitSize, - MEM_COMMIT, - get_protection_type(flags)); - } - if (!NT_SUCCESS(Status)) - { - DPRINT("Could not commit %08lx bytes for sub-heap %p\n", - commitSize, address); - return FALSE; - } - - /* Create the first free block */ - - HEAP_CreateFreeBlock( subheap, (LPBYTE)subheap + subheap->headerSize, - subheap->size - subheap->headerSize ); - - return TRUE; -} - -/*********************************************************************** - * HEAP_CreateSubHeap - * - * Create a sub-heap of the given size. - * If heap == NULL, creates a main heap. - */ -static SUBHEAP *HEAP_CreateSubHeap( HEAP *heap, void *base, DWORD flags, - SIZE_T commitSize, SIZE_T totalSize, - IN PRTL_HEAP_PARAMETERS Parameters) -{ - LPVOID address = base; - - /* round-up sizes on a 64K boundary */ - totalSize = (totalSize + 0xffff) & 0xffff0000; - commitSize = (commitSize + 0xffff) & 0xffff0000; - if (!commitSize) commitSize = 0x10000; - totalSize = min( totalSize, 0xffff0000 ); /* don't allow a heap larger than 4Gb */ - if (totalSize < commitSize) totalSize = commitSize; - - if (!address) - { - /* allocate the memory block */ - if (ZwAllocateVirtualMemory( NtCurrentProcess(), &address, 0, &totalSize, - MEM_RESERVE | MEM_COMMIT, get_protection_type(flags) )) - { - WARN("Could not allocate %08lx bytes\n", totalSize ); - return NULL; - } - } - - /* Initialize subheap */ - - if (!HEAP_InitSubHeap( heap ? heap : (HEAP *)address, - address, flags, commitSize, totalSize, Parameters )) - { - SIZE_T size = 0; - if (!base) ZwFreeVirtualMemory( NtCurrentProcess(), &address, &size, MEM_RELEASE ); - return NULL; - } - - return (SUBHEAP *)address; -} - - -/*********************************************************************** - * HEAP_FindFreeBlock - * - * Find a free block at least as large as the requested size, and make sure - * the requested size is committed. - */ -static ARENA_FREE *HEAP_FindFreeBlock( HEAP *heap, SIZE_T size, - SUBHEAP **ppSubHeap ) -{ - SUBHEAP *subheap; - struct list *ptr; - SIZE_T total_size; - FREE_LIST_ENTRY *pEntry = heap->freeList + get_freelist_index( size + sizeof(ARENA_INUSE) ); - - /* Find a suitable free list, and in it find a block large enough */ - - ptr = &pEntry->arena.entry; - while ((ptr = list_next( &heap->freeList[0].arena.entry, ptr ))) - { - ARENA_FREE *pArena = LIST_ENTRY( ptr, ARENA_FREE, entry ); - SIZE_T arena_size = (pArena->size & ARENA_SIZE_MASK) + - sizeof(ARENA_FREE) - sizeof(ARENA_INUSE); - if (arena_size >= size) - { - subheap = HEAP_FindSubHeap( heap, pArena ); - if (!HEAP_Commit( subheap, (ARENA_INUSE *)pArena, size )) return NULL; - *ppSubHeap = subheap; - return pArena; - } - } - - /* If no block was found, attempt to grow the heap */ - - if (!(heap->flags & HEAP_GROWABLE)) - { - ERR("Not enough space in heap %p for %08lx bytes\n", heap, size ); - return NULL; - } - /* make sure that we have a big enough size *committed* to fit another - * last free arena in ! - * So just one heap struct, one first free arena which will eventually - * get used, and a second free arena that might get assigned all remaining - * free space in HEAP_ShrinkBlock() */ - total_size = size + ROUND_SIZE(sizeof(SUBHEAP)) + sizeof(ARENA_INUSE) + sizeof(ARENA_FREE); - if (total_size < size) return NULL; /* overflow */ - - if (!(subheap = HEAP_CreateSubHeap( heap, NULL, heap->flags, total_size, - max( HEAP_DEF_SIZE, total_size ), NULL ))) - return NULL; - - TRACE("created new sub-heap %p of %08lx bytes for heap %p\n", - subheap, size, heap ); - - *ppSubHeap = subheap; - return (ARENA_FREE *)(subheap + 1); -} - -/*********************************************************************** - * HEAP_IsValidArenaPtr - * - * Check that the pointer is inside the range possible for arenas. - */ -static BOOL HEAP_IsValidArenaPtr( const HEAP *heap, const void *ptr ) -{ - int i; - const SUBHEAP *subheap = HEAP_FindSubHeap( heap, ptr ); - if (!subheap) return FALSE; - if ((const char *)ptr >= (const char *)subheap + subheap->headerSize) return TRUE; - if (subheap != &heap->subheap) return FALSE; - for (i = 0; i < HEAP_NB_FREE_LISTS; i++) - if (ptr == (const void *)&heap->freeList[i].arena) return TRUE; - return FALSE; -} - - -/*********************************************************************** - * HEAP_ValidateFreeArena - */ -static BOOL HEAP_ValidateFreeArena( SUBHEAP *subheap, ARENA_FREE *pArena ) -{ - ARENA_FREE *prev, *next; - char *heapEnd = (char *)subheap + subheap->size; - - /* Check for unaligned pointers */ - if ( (ULONG_PTR)pArena % ALIGNMENT != ARENA_OFFSET ) - { - ERR("Heap %p: unaligned arena pointer %p\n", subheap->heap, pArena ); - return FALSE; - } - - /* Check magic number */ - if (pArena->magic != ARENA_FREE_MAGIC) - { - ERR("Heap %p: invalid free arena magic for %p\n", subheap->heap, pArena ); - return FALSE; - } - /* Check size flags */ - if (!(pArena->size & ARENA_FLAG_FREE) || - (pArena->size & ARENA_FLAG_PREV_FREE)) - { - ERR("Heap %p: bad flags %08lx for free arena %p\n", - subheap->heap, pArena->size & ~ARENA_SIZE_MASK, pArena ); - return FALSE; - } - /* Check arena size */ - if ((char *)(pArena + 1) + (pArena->size & ARENA_SIZE_MASK) > heapEnd) - { - ERR("Heap %p: bad size %08lx for free arena %p\n", - subheap->heap, pArena->size & ARENA_SIZE_MASK, pArena ); - return FALSE; - } - /* Check that next pointer is valid */ - next = LIST_ENTRY( pArena->entry.next, ARENA_FREE, entry ); - if (!HEAP_IsValidArenaPtr( subheap->heap, next )) - { - ERR("Heap %p: bad next ptr %p for arena %p\n", - subheap->heap, next, pArena ); - return FALSE; - } - /* Check that next arena is free */ - if (!(next->size & ARENA_FLAG_FREE) || (next->magic != ARENA_FREE_MAGIC)) - { - ERR("Heap %p: next arena %p invalid for %p\n", - subheap->heap, next, pArena ); - return FALSE; - } - /* Check that prev pointer is valid */ - prev = LIST_ENTRY( pArena->entry.prev, ARENA_FREE, entry ); - if (!HEAP_IsValidArenaPtr( subheap->heap, prev )) - { - ERR("Heap %p: bad prev ptr %p for arena %p\n", - subheap->heap, prev, pArena ); - return FALSE; - } - /* Check that prev arena is free */ - if (!(prev->size & ARENA_FLAG_FREE) || (prev->magic != ARENA_FREE_MAGIC)) - { - /* this often means that the prev arena got overwritten - * by a memory write before that prev arena */ - ERR("Heap %p: prev arena %p invalid for %p\n", - subheap->heap, prev, pArena ); - return FALSE; - } - /* Check that next block has PREV_FREE flag */ - if ((char *)(pArena + 1) + (pArena->size & ARENA_SIZE_MASK) < heapEnd) - { - if (!(*(DWORD *)((char *)(pArena + 1) + - (pArena->size & ARENA_SIZE_MASK)) & ARENA_FLAG_PREV_FREE)) - { - ERR("Heap %p: free arena %p next block has no PREV_FREE flag\n", - subheap->heap, pArena ); - return FALSE; - } - /* Check next block back pointer */ - if (*((ARENA_FREE **)((char *)(pArena + 1) + - (pArena->size & ARENA_SIZE_MASK)) - 1) != pArena) - { - ERR("Heap %p: arena %p has wrong back ptr %p\n", - subheap->heap, pArena, - *((ARENA_FREE **)((char *)(pArena+1) + (pArena->size & ARENA_SIZE_MASK)) - 1)); - return FALSE; - } - } - return TRUE; -} - -/*********************************************************************** - * HEAP_ValidateInUseArena - */ -static BOOL HEAP_ValidateInUseArena( const SUBHEAP *subheap, const ARENA_INUSE *pArena, BOOL quiet ) -{ - const char *heapEnd = (const char *)subheap + subheap->size; - - /* Check for unaligned pointers */ - if ( (ULONG_PTR)pArena % ALIGNMENT != ARENA_OFFSET ) - { - if ( quiet == NOISY ) - { - ERR( "Heap %p: unaligned arena pointer %p\n", subheap->heap, pArena ); - if ( TRACE_ON(heap) ) - HEAP_Dump( subheap->heap ); - } - else if ( WARN_ON(heap) ) - { - WARN( "Heap %p: unaligned arena pointer %p\n", subheap->heap, pArena ); - if ( TRACE_ON(heap) ) - HEAP_Dump( subheap->heap ); - } - return FALSE; - } - - /* Check magic number */ - if (pArena->magic != ARENA_INUSE_MAGIC) - { - if (quiet == NOISY) { - ERR("Heap %p: invalid in-use arena magic for %p\n", subheap->heap, pArena ); - if (TRACE_ON(heap)) - HEAP_Dump( subheap->heap ); - } else if (WARN_ON(heap)) { - WARN("Heap %p: invalid in-use arena magic for %p\n", subheap->heap, pArena ); - if (TRACE_ON(heap)) - HEAP_Dump( subheap->heap ); - } - return FALSE; - } - /* Check size flags */ - if (pArena->size & ARENA_FLAG_FREE) - { - ERR("Heap %p: bad flags %08lx for in-use arena %p\n", - subheap->heap, pArena->size & ~ARENA_SIZE_MASK, pArena ); - return FALSE; - } - /* Check arena size */ - if ((const char *)(pArena + 1) + (pArena->size & ARENA_SIZE_MASK) > heapEnd) - { - ERR("Heap %p: bad size %08lx for in-use arena %p\n", - subheap->heap, pArena->size & ARENA_SIZE_MASK, pArena ); - return FALSE; - } - /* Check next arena PREV_FREE flag */ - if (((const char *)(pArena + 1) + (pArena->size & ARENA_SIZE_MASK) < heapEnd) && - (*(const DWORD *)((const char *)(pArena + 1) + (pArena->size & ARENA_SIZE_MASK)) & ARENA_FLAG_PREV_FREE)) - { - ERR("Heap %p: in-use arena %p next block has PREV_FREE flag\n", - subheap->heap, pArena ); - return FALSE; - } - /* Check prev free arena */ - if (pArena->size & ARENA_FLAG_PREV_FREE) - { - const ARENA_FREE *pPrev = *((const ARENA_FREE * const*)pArena - 1); - /* Check prev pointer */ - if (!HEAP_IsValidArenaPtr( subheap->heap, pPrev )) - { - ERR("Heap %p: bad back ptr %p for arena %p\n", - subheap->heap, pPrev, pArena ); - return FALSE; - } - /* Check that prev arena is free */ - if (!(pPrev->size & ARENA_FLAG_FREE) || - (pPrev->magic != ARENA_FREE_MAGIC)) - { - ERR("Heap %p: prev arena %p invalid for in-use %p\n", - subheap->heap, pPrev, pArena ); - return FALSE; - } - /* Check that prev arena is really the previous block */ - if ((const char *)(pPrev + 1) + (pPrev->size & ARENA_SIZE_MASK) != (const char *)pArena) - { - ERR("Heap %p: prev arena %p is not prev for in-use %p\n", - subheap->heap, pPrev, pArena ); - return FALSE; - } - } - return TRUE; -} - -/*********************************************************************** - * HEAP_IsRealArena [Internal] - * Validates a block is a valid arena. - * - * RETURNS - * TRUE: Success - * FALSE: Failure - */ -static BOOL HEAP_IsRealArena( HEAP *heapPtr, /* [in] ptr to the heap */ - DWORD flags, /* [in] Bit flags that control access during operation */ - LPCVOID block, /* [in] Optional pointer to memory block to validate */ - BOOL quiet ) /* [in] Flag - if true, HEAP_ValidateInUseArena - * does not complain */ -{ - SUBHEAP *subheap; - BOOL ret = TRUE; - - if (!heapPtr || (heapPtr->magic != HEAP_MAGIC)) - { - ERR("Invalid heap %p!\n", heapPtr ); - return FALSE; - } - - flags &= HEAP_NO_SERIALIZE; - flags |= heapPtr->flags; - /* calling HeapLock may result in infinite recursion, so do the critsect directly */ - if (!(flags & HEAP_NO_SERIALIZE)) - RtlEnterHeapLock( &heapPtr->lock ); - - if (block) - { - /* Only check this single memory block */ - - if (!(subheap = HEAP_FindSubHeap( heapPtr, block )) || - ((const char *)block < (char *)subheap + subheap->headerSize - + sizeof(ARENA_INUSE))) - { - if (quiet == NOISY) - ERR("Heap %p: block %p is not inside heap\n", heapPtr, block ); - else if (WARN_ON(heap)) - WARN("Heap %p: block %p is not inside heap\n", heapPtr, block ); - ret = FALSE; - } else - ret = HEAP_ValidateInUseArena( subheap, (const ARENA_INUSE *)block - 1, quiet ); - - if (!(flags & HEAP_NO_SERIALIZE)) - RtlLeaveHeapLock( &heapPtr->lock ); - return ret; - } - - subheap = &heapPtr->subheap; - while (subheap && ret) - { - char *ptr = (char *)subheap + subheap->headerSize; - while (ptr < (char *)subheap + subheap->size) - { - if (*(DWORD *)ptr & ARENA_FLAG_FREE) - { - if (!HEAP_ValidateFreeArena( subheap, (ARENA_FREE *)ptr )) { - ret = FALSE; - break; - } - ptr += sizeof(ARENA_FREE) + (*(DWORD *)ptr & ARENA_SIZE_MASK); - } - else - { - if (!HEAP_ValidateInUseArena( subheap, (ARENA_INUSE *)ptr, NOISY )) { - ret = FALSE; - break; - } - ptr += sizeof(ARENA_INUSE) + (*(DWORD *)ptr & ARENA_SIZE_MASK); - } - } - subheap = subheap->next; - } - - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); - return ret; -} - - -/*********************************************************************** - * HeapCreate (KERNEL32.336) + * RtlCreateHeap * RETURNS * Handle of heap: Success * NULL: Failure @@ -1157,47 +1342,362 @@ static BOOL HEAP_IsRealArena( HEAP *heapPtr, /* [in] ptr to the heap */ * @implemented */ HANDLE NTAPI -RtlCreateHeap(ULONG flags, - PVOID addr, - SIZE_T totalSize, - SIZE_T commitSize, +RtlCreateHeap(ULONG Flags, + PVOID Addr, + SIZE_T TotalSize, + SIZE_T CommitSize, PVOID Lock, PRTL_HEAP_PARAMETERS Parameters) { - SUBHEAP *subheap; + PVOID CommittedAddress = NULL, UncommittedAddress = NULL; + PHEAP Heap = NULL; + RTL_HEAP_PARAMETERS SafeParams = {0}; + PPEB Peb; + ULONG_PTR MaximumUserModeAddress; + SYSTEM_BASIC_INFORMATION SystemInformation; + MEMORY_BASIC_INFORMATION MemoryInfo; + ULONG NtGlobalFlags = RtlGetNtGlobalFlags(); + ULONG HeapSegmentFlags = 0; + NTSTATUS Status; + ULONG MaxBlockSize, HeaderSize; + BOOLEAN AllocateLock = FALSE; - /* Allocate the heap block */ - - if (!totalSize) + /* Check for a special heap */ + if (RtlpPageHeapEnabled && !Addr && !Lock) { - totalSize = HEAP_DEF_SIZE; - flags |= HEAP_GROWABLE; - } - if (!(subheap = HEAP_CreateSubHeap( NULL, addr, flags, commitSize, totalSize, Parameters ))) return 0; + Heap = RtlpPageHeapCreate(Flags, Addr, TotalSize, CommitSize, Lock, Parameters); + if (Heap) return Heap; - if (RtlpGetMode() == UserMode) - { - /* link it into the per-process heap list */ - if (processHeap) + //ASSERT(FALSE); + DPRINT1("Enabling page heap failed\n"); + } + + /* Check validation flags */ + if (!(Flags & HEAP_SKIP_VALIDATION_CHECKS) && (Flags & ~HEAP_CREATE_VALID_MASK)) + { + DPRINT1("Invalid flags 0x%08x, fixing...\n", Flags); + Flags &= HEAP_CREATE_VALID_MASK; + } + + /* TODO: Capture parameters, once we decide to use SEH */ + if (!Parameters) Parameters = &SafeParams; + + /* Check global flags */ + if (NtGlobalFlags & FLG_HEAP_DISABLE_COALESCING) + Flags |= HEAP_DISABLE_COALESCE_ON_FREE; + + if (NtGlobalFlags & FLG_HEAP_ENABLE_FREE_CHECK) + Flags |= HEAP_FREE_CHECKING_ENABLED; + + if (NtGlobalFlags & FLG_HEAP_ENABLE_TAIL_CHECK) + Flags |= HEAP_TAIL_CHECKING_ENABLED; + + if (RtlpGetMode() == UserMode) + { + /* Also check these flags if in usermode */ + if (NtGlobalFlags & FLG_HEAP_VALIDATE_ALL) + Flags |= HEAP_VALIDATE_ALL_ENABLED; + + if (NtGlobalFlags & FLG_HEAP_VALIDATE_PARAMETERS) + Flags |= HEAP_VALIDATE_PARAMETERS_ENABLED; + + if (NtGlobalFlags & FLG_USER_STACK_TRACE_DB) + Flags |= HEAP_CAPTURE_STACK_BACKTRACES; + + /* Get PEB */ + Peb = RtlGetCurrentPeb(); + + /* Apply defaults for non-set parameters */ + if (!Parameters->SegmentCommit) Parameters->SegmentCommit = Peb->HeapSegmentCommit; + if (!Parameters->SegmentReserve) Parameters->SegmentReserve = Peb->HeapSegmentReserve; + if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = Peb->HeapDeCommitFreeBlockThreshold; + if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = Peb->HeapDeCommitTotalFreeThreshold; + } + else + { + /* Apply defaults for non-set parameters */ +#if 0 + if (!Parameters->SegmentCommit) Parameters->SegmentCommit = MmHeapSegmentCommit; + if (!Parameters->SegmentReserve) Parameters->SegmentReserve = MmHeapSegmentReserve; + if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold; + if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold; +#endif + } + + // FIXME: Move to memory manager + if (!Parameters->SegmentCommit) Parameters->SegmentCommit = PAGE_SIZE * 2; + if (!Parameters->SegmentReserve) Parameters->SegmentReserve = 1048576; + if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = PAGE_SIZE; + if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = 65536; + + /* Get the max um address */ + Status = ZwQuerySystemInformation(SystemBasicInformation, + &SystemInformation, + sizeof(SystemInformation), + NULL); + + if (!NT_SUCCESS(Status)) + { + DPRINT1("Getting max usermode address failed with status 0x%08x\n", Status); + return NULL; + } + + MaximumUserModeAddress = SystemInformation.MaximumUserModeAddress; + + /* Calculate max alloc size */ + if (!Parameters->MaximumAllocationSize) + Parameters->MaximumAllocationSize = MaximumUserModeAddress - (ULONG_PTR)0x10000 - PAGE_SIZE; + + MaxBlockSize = 0x80000 - PAGE_SIZE; + + if (!Parameters->VirtualMemoryThreshold || + Parameters->VirtualMemoryThreshold > MaxBlockSize) + { + Parameters->VirtualMemoryThreshold = MaxBlockSize; + } + + /* Check reserve/commit sizes and set default values */ + if (!CommitSize) + { + CommitSize = PAGE_SIZE; + if (TotalSize) + TotalSize = ROUND_UP(TotalSize, PAGE_SIZE); + else + TotalSize = 64 * PAGE_SIZE; + } + else + { + /* Round up the commit size to be at least the page size */ + CommitSize = ROUND_UP(CommitSize, PAGE_SIZE); + + if (TotalSize) + TotalSize = ROUND_UP(TotalSize, PAGE_SIZE); + else + TotalSize = ROUND_UP(CommitSize, 16 * PAGE_SIZE); + } + + /* Call special heap */ + if (RtlpHeapIsSpecial(Flags)) + return RtlDebugCreateHeap(Flags, Addr, TotalSize, CommitSize, Lock, Parameters); + + /* Calculate header size */ + HeaderSize = sizeof(HEAP); + if (!(Flags & HEAP_NO_SERIALIZE)) + { + if (Lock) { - HEAP *heapPtr = subheap->heap; - RtlEnterHeapLock( &processHeap->lock ); - list_add_head( &processHeap->entry, &heapPtr->entry ); - RtlLeaveHeapLock( &processHeap->lock ); + Flags |= HEAP_LOCK_USER_ALLOCATED; } else { - processHeap = subheap->heap; /* assume the first heap we create is the process main heap */ - list_init( &processHeap->entry ); - assert( (ULONG_PTR)processHeap->freeList % ALIGNMENT == ARENA_OFFSET ); + HeaderSize += sizeof(HEAP_LOCK); + AllocateLock = TRUE; } - } + } + else if (Lock) + { + /* Invalid parameters */ + return NULL; + } - return (HANDLE)subheap; + /* See if we are already provided with an address for the heap */ + if (Addr) + { + if (Parameters->CommitRoutine) + { + /* There is a commit routine, so no problem here, check params */ + if ((Flags & HEAP_GROWABLE) || + !Parameters->InitialCommit || + !Parameters->InitialReserve || + (Parameters->InitialCommit > Parameters->InitialReserve)) + { + /* Fail */ + return NULL; + } + + /* Calculate committed and uncommitted addresses */ + CommittedAddress = Addr; + UncommittedAddress = (PCHAR)Addr + Parameters->InitialCommit; + TotalSize = Parameters->InitialReserve; + + /* Zero the initial page ourselves */ + RtlZeroMemory(CommittedAddress, PAGE_SIZE); + } + else + { + /* Commit routine is absent, so query how much memory caller reserved */ + Status = ZwQueryVirtualMemory(NtCurrentProcess(), + Addr, + MemoryBasicInformation, + &MemoryInfo, + sizeof(MemoryInfo), + NULL); + + if (!NT_SUCCESS(Status)) + { + DPRINT1("Querying amount of user supplied memory failed with status 0x%08X\n", Status); + return NULL; + } + + /* Validate it */ + if (MemoryInfo.BaseAddress != Addr || + MemoryInfo.State == MEM_FREE) + { + return NULL; + } + + /* Validation checks passed, set committed/uncommitted addresses */ + CommittedAddress = Addr; + + /* Check if it's committed or not */ + if (MemoryInfo.State == MEM_COMMIT) + { + /* Zero it out because it's already committed */ + RtlZeroMemory(CommittedAddress, PAGE_SIZE); + + /* Calculate uncommitted address value */ + CommitSize = MemoryInfo.RegionSize; + TotalSize = CommitSize; + UncommittedAddress = (PCHAR)Addr + CommitSize; + + /* Check if uncommitted address is reserved */ + Status = ZwQueryVirtualMemory(NtCurrentProcess(), + UncommittedAddress, + MemoryBasicInformation, + &MemoryInfo, + sizeof(MemoryInfo), + NULL); + + if (NT_SUCCESS(Status) && + MemoryInfo.State == MEM_RESERVE) + { + /* It is, so add it up to the reserve size */ + TotalSize += MemoryInfo.RegionSize; + } + } + else + { + /* It's not committed, inform following code that a commit is necessary */ + CommitSize = PAGE_SIZE; + UncommittedAddress = Addr; + } + } + + /* Mark this as a user-committed mem */ + HeapSegmentFlags = HEAP_USER_ALLOCATED; + Heap = (PHEAP)Addr; + } + else + { + /* Check commit routine */ + if (Parameters->CommitRoutine) return NULL; + + /* Reserve memory */ + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID *)&Heap, + 0, + &TotalSize, + MEM_RESERVE, + PAGE_READWRITE); + + if (!NT_SUCCESS(Status)) + { + DPRINT1("Failed to reserve memory with status 0x%08x\n", Status); + return NULL; + } + + /* Set base addresses */ + CommittedAddress = Heap; + UncommittedAddress = Heap; + } + + /* Check if we need to commit something */ + if (CommittedAddress == UncommittedAddress) + { + /* Commit the required size */ + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + &CommittedAddress, + 0, + &CommitSize, + MEM_COMMIT, + PAGE_READWRITE); + + DPRINT("Committed %d bytes at base %p\n", CommitSize, CommittedAddress); + + if (!NT_SUCCESS(Status)) + { + DPRINT1("Failure, Status 0x%08X\n", Status); + + /* Release memory if it was reserved */ + if (!Addr) ZwFreeVirtualMemory(NtCurrentProcess(), + (PVOID *)&Heap, + &TotalSize, + MEM_RELEASE); + + return NULL; + } + + /* Calculate new uncommitted address */ + UncommittedAddress = (PCHAR)UncommittedAddress + CommitSize; + } + + DPRINT("Created heap %p, CommitSize %x, ReserveSize %x\n", Heap, CommitSize, TotalSize); + + /* Initialize the heap */ + RtlpInitializeHeap(Heap, &HeaderSize, Flags, AllocateLock, Lock); + + /* Initialize heap's first segment */ + if (!RtlpInitializeHeapSegment(Heap, + (PHEAP_SEGMENT)((PCHAR)Heap + HeaderSize), + 0, + HeapSegmentFlags, + CommittedAddress, + UncommittedAddress, + (PCHAR)CommittedAddress + TotalSize)) + { + DPRINT1("Failed to initialize heap segment\n"); + return NULL; + } + + /* Set other data */ + Heap->ProcessHeapsListIndex = 0; + Heap->SegmentCommit = Parameters->SegmentCommit; + Heap->SegmentReserve = Parameters->SegmentReserve; + Heap->DeCommitFreeBlockThreshold = Parameters->DeCommitFreeBlockThreshold >> HEAP_ENTRY_SHIFT; + Heap->DeCommitTotalFreeThreshold = Parameters->DeCommitTotalFreeThreshold >> HEAP_ENTRY_SHIFT; + Heap->MaximumAllocationSize = Parameters->MaximumAllocationSize; + Heap->VirtualMemoryThreshold = ROUND_UP(Parameters->VirtualMemoryThreshold, HEAP_ENTRY_SIZE) >> HEAP_ENTRY_SHIFT; + Heap->CommitRoutine = Parameters->CommitRoutine; + + /* Set alignment */ + if (Flags & HEAP_CREATE_ALIGN_16) + { + Heap->AlignMask = (ULONG)~15; + Heap->AlignRound = 15 + sizeof(HEAP_ENTRY); + } + else + { + Heap->AlignMask = (ULONG)~(HEAP_ENTRY_SIZE - 1); + Heap->AlignRound = HEAP_ENTRY_SIZE - 1 + sizeof(HEAP_ENTRY); + } + + if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) + Heap->AlignRound += HEAP_ENTRY_SIZE; + + /* Add heap to process list in case of usermode heap */ + if (RtlpGetMode() == UserMode) + { + RtlpAddHeapToProcessList(Heap); + + // FIXME: What about lookasides? + } + + DPRINT("Heap %p, flags 0x%08x\n", Heap, Heap->Flags); + return Heap; } /*********************************************************************** - * HeapDestroy (KERNEL32.337) + * RtlDestroyHeap * RETURNS * TRUE: Success * FALSE: Failure @@ -1209,39 +1709,357 @@ RtlCreateHeap(ULONG flags, * Failure: The Heap handle, if heap is the process heap. */ HANDLE NTAPI -RtlDestroyHeap(HANDLE heap) /* [in] Handle of heap */ +RtlDestroyHeap(HANDLE HeapPtr) /* [in] Handle of heap */ { - HEAP *heapPtr = HEAP_GetPtr( heap ); - SUBHEAP *subheap; + PHEAP Heap = (PHEAP)HeapPtr; + PLIST_ENTRY Current; + PHEAP_UCR_SEGMENT UcrSegment; + PHEAP_VIRTUAL_ALLOC_ENTRY VirtualEntry; + PVOID BaseAddress; + SIZE_T Size; + LONG i; + PHEAP_SEGMENT Segment; - DPRINT("%p\n", heap ); - if (!heapPtr) - return heap; + if (!HeapPtr) return NULL; - if (RtlpGetMode() == UserMode) - { - if (heap == NtCurrentPeb()->ProcessHeap) - return heap; /* cannot delete the main process heap */ - - /* remove it from the per-process list */ - RtlEnterHeapLock( &processHeap->lock ); - list_remove( &heapPtr->entry ); - RtlLeaveHeapLock( &processHeap->lock ); - } - - RtlDeleteHeapLock( &heapPtr->lock ); - subheap = &heapPtr->subheap; - while (subheap) + /* Call special heap */ + if (RtlpHeapIsSpecial(Heap->Flags)) { - SUBHEAP *next = subheap->next; - SIZE_T size = 0; - void *addr = subheap; - ZwFreeVirtualMemory( NtCurrentProcess(), &addr, &size, MEM_RELEASE ); - subheap = next; + if (!RtlDebugDestroyHeap(Heap)) return HeapPtr; } - return (HANDLE)NULL; + + /* Check for a process heap */ + if (RtlpGetMode() == UserMode && + HeapPtr == NtCurrentPeb()->ProcessHeap) return HeapPtr; + + /* Free up all big allocations */ + Current = Heap->VirtualAllocdBlocks.Flink; + while (Current != &Heap->VirtualAllocdBlocks) + { + VirtualEntry = CONTAINING_RECORD(Current, HEAP_VIRTUAL_ALLOC_ENTRY, Entry); + BaseAddress = (PVOID)VirtualEntry; + Current = Current->Flink; + Size = 0; + ZwFreeVirtualMemory(NtCurrentProcess(), + &BaseAddress, + &Size, + MEM_RELEASE); + } + + /* Delete tags and remove heap from the process heaps list in user mode */ + if (RtlpGetMode() == UserMode) + { + // FIXME DestroyTags + RtlpRemoveHeapFromProcessList(Heap); + } + + /* Delete the heap lock */ + if (!(Heap->Flags & HEAP_NO_SERIALIZE)) + { + /* Delete it if it wasn't user allocated */ + if (!(Heap->Flags & HEAP_LOCK_USER_ALLOCATED)) + RtlDeleteHeapLock(Heap->LockVariable); + + /* Clear out the lock variable */ + Heap->LockVariable = NULL; + } + + /* Free UCR segments if any were created */ + Current = Heap->UCRSegments.Flink; + while(Current != &Heap->UCRSegments) + { + UcrSegment = CONTAINING_RECORD(Current, HEAP_UCR_SEGMENT, ListEntry); + + /* Advance to the next descriptor */ + Current = Current->Flink; + + BaseAddress = (PVOID)UcrSegment; + Size = 0; + + /* Release that memory */ + ZwFreeVirtualMemory(NtCurrentProcess(), + &BaseAddress, + &Size, + MEM_RELEASE); + } + + /* Go through segments and destroy them */ + for (i = HEAP_SEGMENTS - 1; i >= 0; i--) + { + Segment = Heap->Segments[i]; + if (Segment) RtlpDestroyHeapSegment(Segment); + } + + return NULL; } +PHEAP_ENTRY NTAPI +RtlpSplitEntry(PHEAP Heap, + PHEAP_FREE_ENTRY FreeBlock, + SIZE_T AllocationSize, + SIZE_T Index, + SIZE_T Size) +{ + PHEAP_FREE_ENTRY SplitBlock, SplitBlock2; + UCHAR FreeFlags; + PHEAP_ENTRY InUseEntry; + SIZE_T FreeSize; + + /* Save flags, update total free size */ + FreeFlags = FreeBlock->Flags; + Heap->TotalFreeSize -= FreeBlock->Size; + + /* Make this block an in-use one */ + InUseEntry = (PHEAP_ENTRY)FreeBlock; + InUseEntry->Flags = HEAP_ENTRY_BUSY; + InUseEntry->SmallTagIndex = 0; + + /* Calculate the extra amount */ + FreeSize = InUseEntry->Size - Index; + + /* Update it's size fields (we don't need their data anymore) */ + InUseEntry->Size = Index; + InUseEntry->UnusedBytes = AllocationSize - Size; + + /* If there is something to split - do the split */ + if (FreeSize != 0) + { + /* Don't split if resulting entry can't contain any payload data + (i.e. being just HEAP_ENTRY_SIZE) */ + if (FreeSize == 1) + { + /* Increase sizes of the in-use entry */ + InUseEntry->Size++; + InUseEntry->UnusedBytes += sizeof(HEAP_ENTRY); + } + else + { + /* Calculate a pointer to the new entry */ + SplitBlock = (PHEAP_FREE_ENTRY)(InUseEntry + Index); + + /* Initialize it */ + SplitBlock->Flags = FreeFlags; + SplitBlock->SegmentOffset = InUseEntry->SegmentOffset; + SplitBlock->Size = FreeSize; + SplitBlock->PreviousSize = Index; + + /* Check if it's the last entry */ + if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) + { + /* Insert it to the free list if it's the last entry */ + RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); + Heap->TotalFreeSize += FreeSize; + } + else + { + /* Not so easy - need to update next's previous size too */ + SplitBlock2 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize); + + if (SplitBlock2->Flags & HEAP_ENTRY_BUSY) + { + SplitBlock2->PreviousSize = (USHORT)FreeSize; + RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); + Heap->TotalFreeSize += FreeSize; + } + else + { + /* Even more complex - the next entry is free, so we can merge them into one! */ + SplitBlock->Flags = SplitBlock2->Flags; + + /* Remove that next entry */ + RtlpRemoveFreeBlock(Heap, SplitBlock2, FALSE, FALSE); + + /* Update sizes */ + FreeSize += SplitBlock2->Size; + Heap->TotalFreeSize -= SplitBlock2->Size; + + if (FreeSize <= HEAP_MAX_BLOCK_SIZE) + { + /* Insert it back */ + SplitBlock->Size = FreeSize; + + /* Don't forget to update previous size of the next entry! */ + if (!(SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) + { + ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = FreeSize; + } + + /* Actually insert it */ + RtlpInsertFreeBlockHelper(Heap, SplitBlock, (USHORT)FreeSize, FALSE); + + /* Update total size */ + Heap->TotalFreeSize += FreeSize; + } + else + { + /* Resulting block is quite big */ + RtlpInsertFreeBlock(Heap, SplitBlock, FreeSize); + } + } + } + + /* Reset flags of the free entry */ + FreeFlags = 0; + + /* Update last entry in segment */ + if (SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY) + { + Heap->Segments[SplitBlock->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock; + } + } + } + + /* Set last entry flag */ + if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) + InUseEntry->Flags |= HEAP_ENTRY_LAST_ENTRY; + + return InUseEntry; +} + +PVOID NTAPI +RtlpAllocateNonDedicated(PHEAP Heap, + ULONG Flags, + SIZE_T Size, + SIZE_T AllocationSize, + SIZE_T Index, + BOOLEAN HeapLocked) +{ + PLIST_ENTRY FreeListHead, Next; + PHEAP_FREE_ENTRY FreeBlock; + PHEAP_ENTRY InUseEntry; + PHEAP_ENTRY_EXTRA Extra; + EXCEPTION_RECORD ExceptionRecord; + + /* Go through the zero list to find a place where to insert the new entry */ + FreeListHead = &Heap->FreeLists[0]; + + /* Start from the largest block to reduce time */ + Next = FreeListHead->Blink; + if (FreeListHead != Next) + { + FreeBlock = CONTAINING_RECORD(Next, HEAP_FREE_ENTRY, FreeList); + + if (FreeBlock->Size >= Index) + { + /* Our request is smaller than the largest entry in the zero list */ + + /* Go through the list to find insertion place */ + Next = FreeListHead->Flink; + while (FreeListHead != Next) + { + FreeBlock = CONTAINING_RECORD(Next, HEAP_FREE_ENTRY, FreeList); + + if (FreeBlock->Size >= Index) + { + /* Found minimally fitting entry. Proceed to either using it as it is + or splitting it to two entries */ + RemoveEntryList(&FreeBlock->FreeList); + + /* Split it */ + InUseEntry = RtlpSplitEntry(Heap, FreeBlock, AllocationSize, Index, Size); + + /* Release the lock */ + if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); + + /* Zero memory if that was requested */ + if (Flags & HEAP_ZERO_MEMORY) + RtlZeroMemory(InUseEntry + 1, Size); + else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) + { + /* Fill this block with a special pattern */ + RtlFillMemoryUlong(InUseEntry + 1, Size & ~0x3, ARENA_INUSE_FILLER); + } + + /* Fill tail of the block with a special pattern too if requested */ + if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) + { + RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, sizeof(HEAP_ENTRY), HEAP_TAIL_FILL); + InUseEntry->Flags |= HEAP_ENTRY_FILL_PATTERN; + } + + /* Prepare extra if it's present */ + if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) + { + Extra = RtlpGetExtraStuffPointer(InUseEntry); + RtlZeroMemory(Extra, sizeof(HEAP_ENTRY_EXTRA)); + + // TODO: Tagging + } + + /* Return pointer to the */ + return InUseEntry + 1; + } + + /* Advance to the next entry */ + Next = Next->Flink; + } + } + } + + /* Extend the heap, 0 list didn't have anything suitable */ + FreeBlock = RtlpExtendHeap(Heap, AllocationSize); + + /* Use the new biggest entry we've got */ + if (FreeBlock) + { + RemoveEntryList(&FreeBlock->FreeList); + + /* Split it */ + InUseEntry = RtlpSplitEntry(Heap, FreeBlock, AllocationSize, Index, Size); + + /* Release the lock */ + if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); + + /* Zero memory if that was requested */ + if (Flags & HEAP_ZERO_MEMORY) + RtlZeroMemory(InUseEntry + 1, Size); + else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) + { + /* Fill this block with a special pattern */ + RtlFillMemoryUlong(InUseEntry + 1, Size & ~0x3, ARENA_INUSE_FILLER); + } + + /* Fill tail of the block with a special pattern too if requested */ + if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) + { + RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, sizeof(HEAP_ENTRY), HEAP_TAIL_FILL); + InUseEntry->Flags |= HEAP_ENTRY_FILL_PATTERN; + } + + /* Prepare extra if it's present */ + if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) + { + Extra = RtlpGetExtraStuffPointer(InUseEntry); + RtlZeroMemory(Extra, sizeof(HEAP_ENTRY_EXTRA)); + + // TODO: Tagging + } + + /* Return pointer to the */ + return InUseEntry + 1; + } + + /* Really unfortunate, out of memory condition */ + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_NO_MEMORY); + + /* Generate an exception */ + if (Flags & HEAP_GENERATE_EXCEPTIONS) + { + ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY; + ExceptionRecord.ExceptionRecord = NULL; + ExceptionRecord.NumberParameters = 1; + ExceptionRecord.ExceptionFlags = 0; + ExceptionRecord.ExceptionInformation[0] = AllocationSize; + + RtlRaiseException(&ExceptionRecord); + } + + /* Release the lock */ + if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); + DPRINT1("HEAP: Allocation failed!\n"); + DPRINT1("Flags %x\n", Heap->Flags); + return NULL; +} /*********************************************************************** * HeapAlloc (KERNEL32.334) @@ -1252,77 +2070,223 @@ RtlDestroyHeap(HANDLE heap) /* [in] Handle of heap */ * @implemented */ PVOID NTAPI -RtlAllocateHeap(HANDLE heap, /* [in] Handle of private heap block */ - ULONG flags, /* [in] Heap allocation control flags */ - SIZE_T size) /* [in] Number of bytes to allocate */ +RtlAllocateHeap(IN PVOID HeapPtr, + IN ULONG Flags, + IN SIZE_T Size) { - ARENA_FREE *pArena; - ARENA_INUSE *pInUse; - SUBHEAP *subheap; - HEAP *heapPtr = HEAP_GetPtr( heap ); - SIZE_T rounded_size; + PHEAP Heap = (PHEAP)HeapPtr; + PULONG FreeListsInUse; + ULONG FreeListsInUseUlong; + SIZE_T AllocationSize; + SIZE_T Index; + PLIST_ENTRY FreeListHead; + PHEAP_ENTRY InUseEntry; + PHEAP_FREE_ENTRY FreeBlock; + ULONG InUseIndex, i; + UCHAR FreeFlags; + EXCEPTION_RECORD ExceptionRecord; + BOOLEAN HeapLocked = FALSE; + PHEAP_VIRTUAL_ALLOC_ENTRY VirtualBlock = NULL; + PHEAP_ENTRY_EXTRA Extra; + NTSTATUS Status; - /* Validate the parameters */ + /* Force flags */ + Flags |= Heap->ForceFlags; - if (!heapPtr) - { - if (flags & HEAP_GENERATE_EXCEPTIONS) - RtlRaiseStatus( STATUS_NO_MEMORY ); - return NULL; - } - //flags &= HEAP_GENERATE_EXCEPTIONS | HEAP_NO_SERIALIZE | HEAP_ZERO_MEMORY; - flags |= heapPtr->flags; - rounded_size = ROUND_SIZE(size); - if (rounded_size < size) /* overflow */ + /* Call special heap */ + if (RtlpHeapIsSpecial(Flags)) + return RtlDebugAllocateHeap(Heap, Flags, Size); + + /* Check for the maximum size */ + if (Size >= 0x80000000) { - if (flags & HEAP_GENERATE_EXCEPTIONS) RtlRaiseStatus( STATUS_NO_MEMORY ); + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_NO_MEMORY); + DPRINT1("HEAP: Allocation failed!\n"); return NULL; } - if (rounded_size < HEAP_MIN_DATA_SIZE) rounded_size = HEAP_MIN_DATA_SIZE; - - if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->lock ); - /* Locate a suitable free block */ - if (!(pArena = HEAP_FindFreeBlock( heapPtr, rounded_size, &subheap ))) + if (Flags & (HEAP_CREATE_ENABLE_TRACING | + HEAP_CREATE_ALIGN_16)) { - TRACE("(%p,%08lx,%08lx): returning NULL\n", - heap, flags, size ); - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); - if (flags & HEAP_GENERATE_EXCEPTIONS) RtlRaiseStatus( STATUS_NO_MEMORY ); - return NULL; + DPRINT1("HEAP: RtlAllocateHeap is called with unsupported flags %x, ignoring\n", Flags); } - /* Remove the arena from the free list */ + //DPRINT("RtlAllocateHeap(%p %x %x)\n", Heap, Flags, Size); - list_remove( &pArena->entry ); - - /* Build the in-use arena */ - - pInUse = (ARENA_INUSE *)pArena; - - /* in-use arena is smaller than free arena, - * so we have to add the difference to the size */ - pInUse->size = (pInUse->size & ~ARENA_FLAG_FREE) + sizeof(ARENA_FREE) - sizeof(ARENA_INUSE); - pInUse->magic = ARENA_INUSE_MAGIC; - pInUse->has_user_data = 0; - - /* Shrink the block */ - - HEAP_ShrinkBlock( subheap, pInUse, rounded_size ); - pInUse->unused_bytes = (pInUse->size & ARENA_SIZE_MASK) - size; - - if (flags & HEAP_ZERO_MEMORY) - { - clear_block( pInUse + 1, size ); - mark_block_uninitialized( (char *)(pInUse + 1) + size, pInUse->unused_bytes ); - } + /* Calculate allocation size and index */ + if (Size) + AllocationSize = Size; else - mark_block_uninitialized( pInUse + 1, pInUse->size & ARENA_SIZE_MASK ); + AllocationSize = 1; + AllocationSize = (AllocationSize + Heap->AlignRound) & Heap->AlignMask; + Index = AllocationSize >> HEAP_ENTRY_SHIFT; - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); + /* Acquire the lock if necessary */ + if (!(Flags & HEAP_NO_SERIALIZE)) + { + RtlEnterHeapLock(Heap->LockVariable); + HeapLocked = TRUE; + } - TRACE("(%p,%08lx,%08lx): returning %p\n", heap, flags, size, pInUse + 1 ); - return (LPVOID)(pInUse + 1); + /* Depending on the size, the allocation is going to be done from dedicated, + non-dedicated lists or a virtual block of memory */ + if (Index < HEAP_FREELISTS) + { + FreeListHead = &Heap->FreeLists[Index]; + + if (!IsListEmpty(FreeListHead)) + { + /* There is a free entry in this list */ + FreeBlock = CONTAINING_RECORD(FreeListHead->Blink, + HEAP_FREE_ENTRY, + FreeList); + + /* Save flags and remove the free entry */ + FreeFlags = FreeBlock->Flags; + RtlpRemoveFreeBlock(Heap, FreeBlock, TRUE, FALSE); + + /* Update the total free size of the heap */ + Heap->TotalFreeSize -= Index; + + /* Initialize this block */ + InUseEntry = (PHEAP_ENTRY)FreeBlock; + InUseEntry->Flags = HEAP_ENTRY_BUSY | (FreeFlags & HEAP_ENTRY_LAST_ENTRY); + InUseEntry->UnusedBytes = AllocationSize - Size; + InUseEntry->SmallTagIndex = 0; + } + else + { + /* Find smallest free block which this request could fit in */ + InUseIndex = Index >> 5; + FreeListsInUse = &Heap->u.FreeListsInUseUlong[InUseIndex]; + + /* This bit magic disables all sizes which are less than the requested allocation size */ + FreeListsInUseUlong = *FreeListsInUse++ & ~((1 << ((ULONG)Index & 0x1f)) - 1); + + /* If size is definitily more than our lists - go directly to the non-dedicated one */ + if (InUseIndex > 3) + return RtlpAllocateNonDedicated(Heap, Flags, Size, AllocationSize, Index, HeapLocked); + + /* Go through the list */ + for (i = InUseIndex; i < 4; i++) + { + if (FreeListsInUseUlong) + { + FreeListHead = &Heap->FreeLists[i * 32]; + break; + } + + if (i < 3) FreeListsInUseUlong = *FreeListsInUse++; + } + + /* Nothing found, search in the non-dedicated list */ + if (i == 4) + return RtlpAllocateNonDedicated(Heap, Flags, Size, AllocationSize, Index, HeapLocked); + + /* That list is found, now calculate exact block */ + FreeListHead += RtlpFindLeastSetBit(FreeListsInUseUlong); + + /* Take this entry and remove it from the list of free blocks */ + FreeBlock = CONTAINING_RECORD(FreeListHead->Blink, + HEAP_FREE_ENTRY, + FreeList); + RtlpRemoveFreeBlock(Heap, FreeBlock, TRUE, FALSE); + + /* Split it */ + InUseEntry = RtlpSplitEntry(Heap, FreeBlock, AllocationSize, Index, Size); + } + + /* Release the lock */ + if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); + + /* Zero memory if that was requested */ + if (Flags & HEAP_ZERO_MEMORY) + RtlZeroMemory(InUseEntry + 1, Size); + else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) + { + /* Fill this block with a special pattern */ + RtlFillMemoryUlong(InUseEntry + 1, Size & ~0x3, ARENA_INUSE_FILLER); + } + + /* Fill tail of the block with a special pattern too if requested */ + if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) + { + RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, sizeof(HEAP_ENTRY), HEAP_TAIL_FILL); + InUseEntry->Flags |= HEAP_ENTRY_FILL_PATTERN; + } + + /* Prepare extra if it's present */ + if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) + { + Extra = RtlpGetExtraStuffPointer(InUseEntry); + RtlZeroMemory(Extra, sizeof(HEAP_ENTRY_EXTRA)); + + // TODO: Tagging + } + + /* User data starts right after the entry's header */ + return InUseEntry + 1; + } + else if (Index <= Heap->VirtualMemoryThreshold) + { + /* The block is too large for dedicated lists, but fine for a non-dedicated one */ + return RtlpAllocateNonDedicated(Heap, Flags, Size, AllocationSize, Index, HeapLocked); + } + else if (Heap->Flags & HEAP_GROWABLE) + { + /* We've got a very big allocation request, satisfy it by directly allocating virtual memory */ + AllocationSize += sizeof(HEAP_VIRTUAL_ALLOC_ENTRY) - sizeof(HEAP_ENTRY); + + Status = ZwAllocateVirtualMemory(NtCurrentProcess(), + (PVOID *)&VirtualBlock, + 0, + &AllocationSize, + MEM_COMMIT, + PAGE_READWRITE); + + if (!NT_SUCCESS(Status)) + { + // Set STATUS! + /* Release the lock */ + if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); + DPRINT1("HEAP: Allocation failed!\n"); + return NULL; + } + + /* Initialize the newly allocated block */ + VirtualBlock->BusyBlock.Size = (AllocationSize - Size); + VirtualBlock->BusyBlock.Flags = HEAP_ENTRY_VIRTUAL_ALLOC | HEAP_ENTRY_EXTRA_PRESENT | HEAP_ENTRY_BUSY; + VirtualBlock->CommitSize = AllocationSize; + VirtualBlock->ReserveSize = AllocationSize; + + /* Insert it into the list of virtual allocations */ + InsertTailList(&Heap->VirtualAllocdBlocks, &VirtualBlock->Entry); + + /* Release the lock */ + if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); + + /* Return pointer to user data */ + return VirtualBlock + 1; + } + + /* Generate an exception */ + if (Flags & HEAP_GENERATE_EXCEPTIONS) + { + ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY; + ExceptionRecord.ExceptionRecord = NULL; + ExceptionRecord.NumberParameters = 1; + ExceptionRecord.ExceptionFlags = 0; + ExceptionRecord.ExceptionInformation[0] = AllocationSize; + + RtlRaiseException(&ExceptionRecord); + } + + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_BUFFER_TOO_SMALL); + + /* Release the lock */ + if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); + DPRINT1("HEAP: Allocation failed!\n"); + return NULL; } @@ -1335,57 +2299,386 @@ RtlAllocateHeap(HANDLE heap, /* [in] Handle of private heap block */ * @implemented */ BOOLEAN NTAPI RtlFreeHeap( - HANDLE heap, /* [in] Handle of heap */ - ULONG flags, /* [in] Heap freeing flags */ - PVOID ptr /* [in] Address of memory to free */ + HANDLE HeapPtr, /* [in] Handle of heap */ + ULONG Flags, /* [in] Heap freeing flags */ + PVOID Ptr /* [in] Address of memory to free */ ) { - ARENA_INUSE *pInUse; - SUBHEAP *subheap; - HEAP *heapPtr; + PHEAP Heap; + PHEAP_ENTRY HeapEntry; + USHORT TagIndex = 0; + SIZE_T BlockSize; + PHEAP_VIRTUAL_ALLOC_ENTRY VirtualEntry; + BOOLEAN Locked = FALSE; + NTSTATUS Status; - /* Validate the parameters */ + /* Freeing NULL pointer is a legal operation */ + if (!Ptr) return TRUE; - if (!ptr) return TRUE; /* freeing a NULL ptr isn't an error in Win2k */ + /* Get pointer to the heap and force flags */ + Heap = (PHEAP)HeapPtr; + Flags |= Heap->ForceFlags; - heapPtr = HEAP_GetPtr( heap ); - if (!heapPtr) + /* Call special heap */ + if (RtlpHeapIsSpecial(Flags)) + return RtlDebugFreeHeap(Heap, Flags, Ptr); + + /* Lock if necessary */ + if (!(Flags & HEAP_NO_SERIALIZE)) { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE ); + RtlEnterHeapLock(Heap->LockVariable); + Locked = TRUE; + } + + /* Get pointer to the heap entry */ + HeapEntry = (PHEAP_ENTRY)Ptr - 1; + + /* Check this entry, fail if it's invalid */ + if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY) || + (((ULONG_PTR)Ptr & 0x7) != 0) || + (HeapEntry->SegmentOffset >= HEAP_SEGMENTS)) + { + /* This is an invalid block */ + DPRINT1("HEAP: Trying to free an invalid address %p!\n", Ptr); + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); + + /* Release the heap lock */ + if (Locked) RtlLeaveHeapLock(Heap->LockVariable); return FALSE; } - flags &= HEAP_NO_SERIALIZE; - flags |= heapPtr->flags; - if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->lock ); - if (!HEAP_IsRealArena( heapPtr, HEAP_NO_SERIALIZE, ptr, QUIET )) + if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) { - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER ); - TRACE("(%p,%08lx,%p): returning FALSE\n", heap, flags, ptr ); - return FALSE; + /* Big allocation */ + VirtualEntry = CONTAINING_RECORD(HeapEntry, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); + + /* Remove it from the list */ + RemoveEntryList(&VirtualEntry->Entry); + + // TODO: Tagging + + BlockSize = 0; + Status = ZwFreeVirtualMemory(NtCurrentProcess(), + (PVOID *)&VirtualEntry, + &BlockSize, + MEM_RELEASE); + + if (!NT_SUCCESS(Status)) + { + DPRINT1("HEAP: Failed releasing memory with Status 0x%08X. Heap %p, ptr %p, base address %p\n", + Status, Heap, Ptr, VirtualEntry); + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(Status); + } + } + else + { + /* Normal allocation */ + BlockSize = HeapEntry->Size; + + // TODO: Tagging + + /* Coalesce in kernel mode, and in usermode if it's not disabled */ + if (RtlpGetMode() == KernelMode || + (RtlpGetMode() == UserMode && !(Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE))) + { + HeapEntry = (PHEAP_ENTRY)RtlpCoalesceFreeBlocks(Heap, + (PHEAP_FREE_ENTRY)HeapEntry, + &BlockSize, + FALSE); + } + + /* If there is no need to decommit the block - put it into a free list */ + if (BlockSize < Heap->DeCommitFreeBlockThreshold || + (Heap->TotalFreeSize + BlockSize < Heap->DeCommitTotalFreeThreshold)) + { + /* Check if it needs to go to a 0 list */ + if (BlockSize > HEAP_MAX_BLOCK_SIZE) + { + /* General-purpose 0 list */ + RtlpInsertFreeBlock(Heap, (PHEAP_FREE_ENTRY)HeapEntry, BlockSize); + } + else + { + /* Usual free list */ + RtlpInsertFreeBlockHelper(Heap, (PHEAP_FREE_ENTRY)HeapEntry, BlockSize, FALSE); + + /* Assert sizes are consistent */ + if (!(HeapEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) + { + ASSERT((HeapEntry + BlockSize)->PreviousSize == BlockSize); + } + + /* Increase the free size */ + Heap->TotalFreeSize += BlockSize; + } + + + if (RtlpGetMode() == UserMode && + TagIndex != 0) + { + // FIXME: Tagging + UNIMPLEMENTED; + } + } + else + { + /* Decommit this block */ + RtlpDeCommitFreeBlock(Heap, (PHEAP_FREE_ENTRY)HeapEntry, BlockSize); + } } - /* Some sanity checks */ - pInUse = (ARENA_INUSE *)ptr - 1; - subheap = HEAP_FindSubHeap( heapPtr, pInUse ); - if ((char *)pInUse < (char *)subheap + subheap->headerSize) goto error; - if (!HEAP_ValidateInUseArena( subheap, pInUse, QUIET )) goto error; + /* Release the heap lock */ + if (Locked) RtlLeaveHeapLock(Heap->LockVariable); - /* Turn the block into a free block */ - - HEAP_MakeInUseBlockFree( subheap, pInUse ); - - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); - - TRACE("(%p,%08lx,%p): returning TRUE\n", heap, flags, ptr ); return TRUE; +} -error: - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER ); - TRACE("(%p,%08x,%p): returning FALSE\n", heap, flags, ptr ); - return FALSE; +BOOLEAN NTAPI +RtlpGrowBlockInPlace (IN PHEAP Heap, + IN ULONG Flags, + IN PHEAP_ENTRY InUseEntry, + IN SIZE_T Size, + IN SIZE_T Index) +{ + UCHAR EntryFlags, RememberFlags; + PHEAP_FREE_ENTRY FreeEntry, UnusedEntry, FollowingEntry; + SIZE_T FreeSize, PrevSize, TailPart, AddedSize = 0; + PHEAP_ENTRY_EXTRA OldExtra, NewExtra; + + /* We can't grow beyond specified threshold */ + if (Index > Heap->VirtualMemoryThreshold) + return FALSE; + + /* Get entry flags */ + EntryFlags = InUseEntry->Flags; + + /* Get the next free entry */ + FreeEntry = (PHEAP_FREE_ENTRY)(InUseEntry + InUseEntry->Size); + + if (EntryFlags & HEAP_ENTRY_LAST_ENTRY) + { + /* There is no next block, just uncommitted space. Calculate how much is needed */ + FreeSize = (Index - InUseEntry->Size) << HEAP_ENTRY_SHIFT; + FreeSize = ROUND_UP(FreeSize, PAGE_SIZE); + + /* Find and commit those pages */ + FreeEntry = RtlpFindAndCommitPages(Heap, + Heap->Segments[InUseEntry->SegmentOffset], + &FreeSize, + FreeEntry); + + /* Fail if it failed... */ + if (!FreeEntry) return FALSE; + + /* It was successful, perform coalescing */ + FreeSize = FreeSize >> HEAP_ENTRY_SHIFT; + FreeEntry = RtlpCoalesceFreeBlocks(Heap, FreeEntry, &FreeSize, FALSE); + + /* Check if it's enough */ + if (FreeSize + InUseEntry->Size < Index) + { + /* Still not enough */ + RtlpInsertFreeBlock(Heap, FreeEntry, FreeSize); + Heap->TotalFreeSize += FreeSize; + return FALSE; + } + + /* Remember flags of this free entry */ + RememberFlags = FreeEntry->Flags; + + /* Sum up sizes */ + FreeSize += InUseEntry->Size; + } + else + { + /* The next block indeed exists. Check if it's free or in use */ + if (FreeEntry->Flags & HEAP_ENTRY_BUSY) return FALSE; + + /* Next entry is free, check if it can fit the block we need */ + FreeSize = InUseEntry->Size + FreeEntry->Size; + if (FreeSize < Index) return FALSE; + + /* Remember flags of this free entry */ + RememberFlags = FreeEntry->Flags; + + /* Remove this block from the free list */ + RtlpRemoveFreeBlock(Heap, FreeEntry, FALSE, FALSE); + Heap->TotalFreeSize -= FreeEntry->Size; + } + + PrevSize = (InUseEntry->Size << HEAP_ENTRY_SHIFT) - InUseEntry->UnusedBytes; + FreeSize -= Index; + + /* Don't produce too small blocks */ + if (FreeSize <= 2) + { + Index += FreeSize; + FreeSize = 0; + } + + /* Process extra stuff */ + if (RememberFlags & HEAP_ENTRY_EXTRA_PRESENT) + { + /* Calculate pointers */ + OldExtra = (PHEAP_ENTRY_EXTRA)(InUseEntry + InUseEntry->Size - 1); + NewExtra = (PHEAP_ENTRY_EXTRA)(InUseEntry + Index - 1); + + /* Copy contents */ + *NewExtra = *OldExtra; + + // FIXME Tagging + } + + /* Update sizes */ + InUseEntry->Size = Index; + InUseEntry->UnusedBytes = ((Index << HEAP_ENTRY_SHIFT) - Size); + + /* Check if there is a free space remaining after merging those blocks */ + if (!FreeSize) + { + /* Update flags and sizes */ + InUseEntry->Flags |= RememberFlags & HEAP_ENTRY_LAST_ENTRY; + + /* Either update previous size of the next entry or mark it as a last + entry in the segment*/ + if (RememberFlags & HEAP_ENTRY_LAST_ENTRY) + Heap->Segments[InUseEntry->SegmentOffset]->LastEntryInSegment = InUseEntry; + else + (InUseEntry + InUseEntry->Size)->PreviousSize = InUseEntry->Size; + } + else + { + /* Complex case, we need to split the block to give unused free space + back to the heap */ + UnusedEntry = (PHEAP_FREE_ENTRY)(InUseEntry + Index); + UnusedEntry->PreviousSize = Index; + UnusedEntry->SegmentOffset = InUseEntry->SegmentOffset; + + /* Update the following block or set the last entry in the segment */ + if (RememberFlags & HEAP_ENTRY_LAST_ENTRY) + { + /* Set last entry and set flags and size */ + Heap->Segments[InUseEntry->SegmentOffset]->LastEntryInSegment = InUseEntry; + UnusedEntry->Flags = RememberFlags; + UnusedEntry->Size = FreeSize; + + /* Insert it to the heap and update total size */ + RtlpInsertFreeBlockHelper(Heap, UnusedEntry, FreeSize, FALSE); + Heap->TotalFreeSize += FreeSize; + } + else + { + /* There is a block after this one */ + FollowingEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)UnusedEntry + FreeSize); + + if (FollowingEntry->Flags & HEAP_ENTRY_BUSY) + { + /* Update flags and set size of the unused space entry */ + UnusedEntry->Flags = RememberFlags & (~HEAP_ENTRY_LAST_ENTRY); + UnusedEntry->Size = FreeSize; + + /* Update previous size of the following entry */ + FollowingEntry->PreviousSize = FreeSize; + + /* Insert it to the heap and update total free size */ + RtlpInsertFreeBlockHelper(Heap, UnusedEntry, FreeSize, FALSE); + Heap->TotalFreeSize += FreeSize; + } + else + { + /* That following entry is also free, what a fortune! */ + RememberFlags = FollowingEntry->Flags; + + /* Remove it */ + RtlpRemoveFreeBlock(Heap, FollowingEntry, FALSE, FALSE); + Heap->TotalFreeSize -= FollowingEntry->Size; + + /* And make up a new combined block */ + FreeSize += FollowingEntry->Size; + UnusedEntry->Flags = RememberFlags; + + /* Check where to put it */ + if (FreeSize <= HEAP_MAX_BLOCK_SIZE) + { + /* Fine for a dedicated list */ + UnusedEntry->Size = FreeSize; + + if (RememberFlags & HEAP_ENTRY_LAST_ENTRY) + Heap->Segments[UnusedEntry->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)UnusedEntry; + else + ((PHEAP_ENTRY)UnusedEntry + FreeSize)->PreviousSize = FreeSize; + + /* Insert it back and update total size */ + RtlpInsertFreeBlockHelper(Heap, UnusedEntry, FreeSize, FALSE); + Heap->TotalFreeSize += FreeSize; + } + else + { + /* The block is very large, leave all the hassle to the insertion routine */ + RtlpInsertFreeBlock(Heap, UnusedEntry, FreeSize); + } + } + } + } + + /* Copy user settable flags */ + InUseEntry->Flags &= ~HEAP_ENTRY_SETTABLE_FLAGS; + InUseEntry->Flags |= ((Flags & HEAP_SETTABLE_USER_FLAGS) >> 4); + + /* Properly "zero out" (and fill!) the space */ + if (Flags & HEAP_ZERO_MEMORY) + { + RtlZeroMemory((PCHAR)(InUseEntry + 1) + PrevSize, Size - PrevSize); + } + else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) + { + /* Calculate tail part which we need to fill */ + TailPart = PrevSize & (sizeof(ULONG) - 1); + + /* "Invert" it as usual */ + if (TailPart) TailPart = 4 - TailPart; + + if (Size > (PrevSize + TailPart)) + AddedSize = (Size - (PrevSize + TailPart)) & ~(sizeof(ULONG) - 1); + + if (AddedSize) + { + RtlFillMemoryUlong((PCHAR)(InUseEntry + 1) + PrevSize + TailPart, + AddedSize, + ARENA_INUSE_FILLER); + } + } + + /* Fill the new tail */ + if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) + { + RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, + HEAP_ENTRY_SIZE, + HEAP_TAIL_FILL); + } + + /* Return success */ + return TRUE; +} + +PHEAP_ENTRY_EXTRA NTAPI +RtlpGetExtraStuffPointer(PHEAP_ENTRY HeapEntry) +{ + PHEAP_VIRTUAL_ALLOC_ENTRY VirtualEntry; + + /* Check if it's a big block */ + if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) + { + VirtualEntry = CONTAINING_RECORD(HeapEntry, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); + + /* Return a pointer to the extra stuff*/ + return &VirtualEntry->ExtraStuff; + } + else + { + /* This is a usual entry, which means extra stuff follows this block */ + return (PHEAP_ENTRY_EXTRA)(HeapEntry + HeapEntry->Size - 1); + } } @@ -1403,129 +2696,411 @@ error: * 0x7d030f60--invalid flags in RtlHeapAllocate * @implemented */ -PVOID NTAPI RtlReAllocateHeap( - HANDLE heap, - ULONG flags, - PVOID ptr, - SIZE_T size -) +PVOID NTAPI +RtlReAllocateHeap(HANDLE HeapPtr, + ULONG Flags, + PVOID Ptr, + SIZE_T Size) { - ARENA_INUSE *pArena; - HEAP *heapPtr; - SUBHEAP *subheap; - SIZE_T oldBlockSize, oldActualSize, rounded_size; + PHEAP Heap = (PHEAP)HeapPtr; + PHEAP_ENTRY InUseEntry, NewInUseEntry; + PHEAP_ENTRY_EXTRA OldExtra, NewExtra; + SIZE_T AllocationSize, FreeSize, DecommitSize; + BOOLEAN HeapLocked = FALSE; + PVOID NewBaseAddress; + PHEAP_FREE_ENTRY SplitBlock, SplitBlock2; + SIZE_T OldSize, Index, OldIndex; + UCHAR FreeFlags; + NTSTATUS Status; + PVOID DecommitBase; + SIZE_T RemainderBytes, ExtraSize; + PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; + EXCEPTION_RECORD ExceptionRecord; - if (!ptr) return NULL; - if (!(heapPtr = HEAP_GetPtr( heap ))) + /* Return success in case of a null pointer */ + if (!Ptr) { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE ); + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_SUCCESS); return NULL; } - /* Validate the parameters */ + /* Force heap flags */ + Flags |= Heap->ForceFlags; - //Flags &= HEAP_GENERATE_EXCEPTIONS | HEAP_NO_SERIALIZE | HEAP_ZERO_MEMORY | - // HEAP_REALLOC_IN_PLACE_ONLY; - flags |= heapPtr->flags; - if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->lock ); + /* Call special heap */ + if (RtlpHeapIsSpecial(Flags)) + return RtlDebugReAllocateHeap(Heap, Flags, Ptr, Size); - rounded_size = ROUND_SIZE(size); - if (rounded_size < size) goto oom; /* overflow */ - if (rounded_size < HEAP_MIN_DATA_SIZE) rounded_size = HEAP_MIN_DATA_SIZE; - - pArena = (ARENA_INUSE *)ptr - 1; - if(!(subheap = HEAP_FindSubHeap( heapPtr, pArena ))) goto done; - if ((char *)pArena < (char *)subheap + subheap->headerSize) goto error; - if (!HEAP_ValidateInUseArena( subheap, pArena, QUIET )) goto error; - - oldBlockSize = (pArena->size & ARENA_SIZE_MASK); - oldActualSize = (pArena->size & ARENA_SIZE_MASK) - pArena->unused_bytes; - - if (rounded_size > oldBlockSize) + /* Make sure size is valid */ + if (Size >= 0x80000000) { - char *pNext = (char *)(pArena + 1) + oldBlockSize; - if ((pNext < (char *)subheap + subheap->size) && - (*(DWORD *)pNext & ARENA_FLAG_FREE) && - (oldBlockSize + (*(DWORD *)pNext & ARENA_SIZE_MASK) + sizeof(ARENA_FREE) >= rounded_size)) + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_NO_MEMORY); + return NULL; + } + + /* Calculate allocation size and index */ + if (Size) + AllocationSize = Size; + else + AllocationSize = 1; + AllocationSize = (AllocationSize + Heap->AlignRound) & Heap->AlignMask; + + /* Add up extra stuff, if it is present anywhere */ + if (((((PHEAP_ENTRY)Ptr)-1)->Flags & HEAP_ENTRY_EXTRA_PRESENT) || + (Flags & HEAP_EXTRA_FLAGS_MASK) || + Heap->PseudoTagEntries) + { + AllocationSize += sizeof(HEAP_ENTRY_EXTRA); + } + + /* Acquire the lock if necessary */ + if (!(Flags & HEAP_NO_SERIALIZE)) + { + RtlEnterHeapLock(Heap->LockVariable); + HeapLocked = TRUE; + Flags ^= HEAP_NO_SERIALIZE; + } + + /* Get the pointer to the in-use entry */ + InUseEntry = (PHEAP_ENTRY)Ptr - 1; + + /* If that entry is not really in-use, we have a problem */ + if (!(InUseEntry->Flags & HEAP_ENTRY_BUSY)) + { + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); + + /* Release the lock and return */ + if (HeapLocked) + RtlLeaveHeapLock(Heap->LockVariable); + return Ptr; + } + + if (InUseEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) + { + /* This is a virtually allocated block. Get its size */ + OldSize = RtlpGetSizeOfBigBlock(InUseEntry); + + /* Convert it to an index */ + OldIndex = (OldSize + InUseEntry->Size) >> HEAP_ENTRY_SHIFT; + + /* Calculate new allocation size and round it to the page size */ + AllocationSize += FIELD_OFFSET(HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); + AllocationSize = ROUND_UP(AllocationSize, PAGE_SIZE); + } + else + { + /* Usual entry */ + OldIndex = InUseEntry->Size; + + OldSize = (OldIndex << HEAP_ENTRY_SHIFT) - InUseEntry->UnusedBytes; + } + + /* Calculate new index */ + Index = AllocationSize >> HEAP_ENTRY_SHIFT; + + /* Check for 4 different scenarios (old size, new size, old index, new index) */ + if (Index <= OldIndex) + { + /* Difference must be greater than 1, adjust if it's not so */ + if (Index + 1 == OldIndex) { - ARENA_FREE *pFree = (ARENA_FREE *)pNext; - list_remove( &pFree->entry ); - pArena->size += (pFree->size & ARENA_SIZE_MASK) + sizeof(*pFree); - - if (!HEAP_Commit( subheap, pArena, rounded_size )) goto oom; - - HEAP_ShrinkBlock( subheap, pArena, rounded_size ); - mark_block_initialized( pArena + 1, oldActualSize ); + Index++; + AllocationSize += sizeof(HEAP_ENTRY); } - else /* Do it the hard way */ + + /* Calculate new size */ + if (InUseEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) { - ARENA_FREE *pNew; - ARENA_INUSE *pInUse; - SUBHEAP *newsubheap; - if ((flags & HEAP_REALLOC_IN_PLACE_ONLY) || - !(pNew = HEAP_FindFreeBlock( heapPtr, rounded_size, &newsubheap ))) - goto oom; + /* Simple in case of a virtual alloc - just an unused size */ + InUseEntry->Size = AllocationSize - Size; + } + else if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) + { + /* There is extra stuff, take it into account */ + OldExtra = (PHEAP_ENTRY_EXTRA)(InUseEntry + InUseEntry->Size - 1); + NewExtra = (PHEAP_ENTRY_EXTRA)(InUseEntry + Index - 1); + *NewExtra = *OldExtra; - /* Build the in-use arena */ + // FIXME Tagging, TagIndex - list_remove( &pNew->entry ); - pInUse = (ARENA_INUSE *)pNew; - pInUse->size = (pInUse->size & ~ARENA_FLAG_FREE) - + sizeof(ARENA_FREE) - sizeof(ARENA_INUSE); - pInUse->magic = ARENA_INUSE_MAGIC; - HEAP_ShrinkBlock( newsubheap, pInUse, rounded_size ); + /* Update unused bytes count */ + InUseEntry->UnusedBytes = AllocationSize - Size; + } + else + { + // FIXME Tagging, SmallTagIndex + InUseEntry->UnusedBytes = AllocationSize - Size; + } - mark_block_initialized( pInUse + 1, oldActualSize ); - memcpy( pInUse + 1, pArena + 1, oldActualSize ); + /* If new size is bigger than the old size */ + if (Size > OldSize) + { + /* Zero out that additional space if required */ + if (Flags & HEAP_ZERO_MEMORY) + { + RtlZeroMemory((PCHAR)Ptr + OldSize, Size - OldSize); + } + else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) + { + /* Fill it on free if required */ + RemainderBytes = OldSize & (sizeof(ULONG) - 1); - /* Free the previous block */ + if (RemainderBytes) + RemainderBytes = 4 - RemainderBytes; - HEAP_MakeInUseBlockFree( subheap, pArena ); - subheap = newsubheap; - pArena = pInUse; + if (Size > (OldSize + RemainderBytes)) + { + /* Calculate actual amount of extra bytes to fill */ + ExtraSize = (Size - (OldSize + RemainderBytes)) & ~(sizeof(ULONG) - 1); + + /* Fill them if there are any */ + if (ExtraSize != 0) + { + RtlFillMemoryUlong((PCHAR)(InUseEntry + 1) + OldSize + RemainderBytes, + ExtraSize, + ARENA_INUSE_FILLER); + } + } + } + } + + /* Fill tail of the heap entry if required */ + if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) + { + RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, + HEAP_ENTRY_SIZE, + HEAP_TAIL_FILL); + } + + /* Check if the difference is significant or not */ + if (Index != OldIndex) + { + /* Save flags */ + FreeFlags = InUseEntry->Flags & ~HEAP_ENTRY_BUSY; + + if (FreeFlags & HEAP_ENTRY_VIRTUAL_ALLOC) + { + /* This is a virtual block allocation */ + VirtualAllocBlock = CONTAINING_RECORD(InUseEntry, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); + + // FIXME Tagging! + + DecommitBase = (PCHAR)VirtualAllocBlock + AllocationSize; + DecommitSize = (OldIndex << HEAP_ENTRY_SHIFT) - AllocationSize; + + /* Release the memory */ + Status = ZwFreeVirtualMemory(NtCurrentProcess(), + (PVOID *)&DecommitBase, + &DecommitSize, + MEM_RELEASE); + + if (!NT_SUCCESS(Status)) + { + DPRINT1("HEAP: Unable to release memory (pointer %p, size 0x%x), Status %08x\n", DecommitBase, DecommitSize, Status); + } + else + { + /* Otherwise reduce the commit size */ + VirtualAllocBlock->CommitSize -= DecommitSize; + } + } + else + { + /* Reduce size of the block and possibly split it */ + SplitBlock = (PHEAP_FREE_ENTRY)(InUseEntry + Index); + + /* Initialize this entry */ + SplitBlock->Flags = FreeFlags; + SplitBlock->PreviousSize = Index; + SplitBlock->SegmentOffset = InUseEntry->SegmentOffset; + + /* Remember free size */ + FreeSize = InUseEntry->Size - Index; + + /* Set new size */ + InUseEntry->Size = Index; + InUseEntry->Flags &= ~HEAP_ENTRY_LAST_ENTRY; + + /* Is that the last entry */ + if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) + { + /* Update segment's last entry */ + Heap->Segments[SplitBlock->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock; + + /* Set its size and insert it to the list */ + SplitBlock->Size = (USHORT)FreeSize; + RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); + + /* Update total free size */ + Heap->TotalFreeSize += FreeSize; + } + else + { + /* Get the block after that one */ + SplitBlock2 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize); + + if (SplitBlock2->Flags & HEAP_ENTRY_BUSY) + { + /* It's in use, add it here*/ + SplitBlock->Size = (USHORT)FreeSize; + + /* Update previous size of the next entry */ + ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = (USHORT)FreeSize; + + /* Insert it to the list */ + RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); + + /* Update total size */ + Heap->TotalFreeSize += FreeSize; + } + else + { + /* Next entry is free, so merge with it */ + SplitBlock->Flags = SplitBlock2->Flags; + + /* Remove it, update total size */ + RtlpRemoveFreeBlock(Heap, SplitBlock2, FALSE, FALSE); + Heap->TotalFreeSize -= SplitBlock2->Size; + + /* Calculate total free size */ + FreeSize += SplitBlock2->Size; + + if (FreeSize <= HEAP_MAX_BLOCK_SIZE) + { + SplitBlock->Size = FreeSize; + + if (!(SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) + { + /* Update previous size of the next entry */ + ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = FreeSize; + } + else + { + Heap->Segments[SplitBlock->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock; + } + + /* Insert the new one back and update total size */ + RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); + Heap->TotalFreeSize += FreeSize; + } + else + { + /* Just add it */ + RtlpInsertFreeBlock(Heap, SplitBlock, FreeSize); + } + } + } + } } } else { - HEAP_ShrinkBlock( subheap, pArena, rounded_size ); /* Shrink the block */ - mark_block_initialized( pArena + 1, size ); - } - - pArena->unused_bytes = (pArena->size & ARENA_SIZE_MASK) - size; - - /* Clear the extra bytes if needed */ - - if (size > oldActualSize) - { - if (flags & HEAP_ZERO_MEMORY) + /* We're growing the block */ + if ((InUseEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) || + !RtlpGrowBlockInPlace(Heap, Flags, InUseEntry, Size, Index)) { - clear_block( (char *)(pArena + 1) + oldActualSize, size - oldActualSize ); - mark_block_uninitialized( (char *)(pArena + 1) + size, - (pArena->size & ARENA_SIZE_MASK) - oldActualSize ); + /* Growing in place failed, so growing out of place */ + if (Flags & HEAP_REALLOC_IN_PLACE_ONLY) + { + DPRINT1("Realloc in place failed, but it was the only option\n"); + Ptr = NULL; + } + else + { + /* Clear tag bits */ + Flags &= ~HEAP_TAG_MASK; + + /* Process extra stuff */ + if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) + { + /* Preserve user settable flags */ + Flags &= ~HEAP_SETTABLE_USER_FLAGS; + + Flags |= HEAP_SETTABLE_USER_VALUE | ((InUseEntry->Flags & HEAP_ENTRY_SETTABLE_FLAGS) << 4); + + /* Get pointer to the old extra data */ + OldExtra = RtlpGetExtraStuffPointer(InUseEntry); + + /* Save tag index if it was set */ + if (OldExtra->TagIndex && + !(OldExtra->TagIndex & HEAP_PSEUDO_TAG_FLAG)) + { + Flags |= OldExtra->TagIndex << HEAP_TAG_SHIFT; + } + } + else if (InUseEntry->SmallTagIndex) + { + /* Take small tag index into account */ + Flags |= InUseEntry->SmallTagIndex << HEAP_TAG_SHIFT; + } + + /* Allocate new block from the heap */ + NewBaseAddress = RtlAllocateHeap(HeapPtr, + Flags & ~HEAP_ZERO_MEMORY, + Size); + + /* Proceed if it didn't fail */ + if (NewBaseAddress) + { + /* Get new entry pointer */ + NewInUseEntry = (PHEAP_ENTRY)NewBaseAddress - 1; + + /* Process extra stuff if it exists */ + if (NewInUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) + { + NewExtra = RtlpGetExtraStuffPointer(NewInUseEntry); + + if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) + { + OldExtra = RtlpGetExtraStuffPointer(InUseEntry); + NewExtra->Settable = OldExtra->Settable; + } + else + { + RtlZeroMemory(NewExtra, sizeof(*NewExtra)); + } + } + + /* Copy actual user bits */ + if (Size < OldSize) + RtlMoveMemory(NewBaseAddress, Ptr, Size); + else + RtlMoveMemory(NewBaseAddress, Ptr, OldSize); + + /* Zero remaining part if required */ + if (Size > OldSize && + (Flags & HEAP_ZERO_MEMORY)) + { + RtlZeroMemory((PCHAR)NewBaseAddress + OldSize, Size - OldSize); + } + + /* Free the old block */ + RtlFreeHeap(HeapPtr, Flags, Ptr); + } + + Ptr = NewBaseAddress; + } } - else - mark_block_uninitialized( (char *)(pArena + 1) + oldActualSize, - (pArena->size & ARENA_SIZE_MASK) - oldActualSize ); } - /* Return the new arena */ -done: - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); - TRACE("(%p,%08lx,%p,%08lx): returning %p\n", heap, flags, ptr, size, pArena + 1 ); - return (LPVOID)(pArena + 1); + /* Did resizing fail? */ + if (!Ptr && (Flags & HEAP_GENERATE_EXCEPTIONS)) + { + /* Generate an exception if required */ + ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY; + ExceptionRecord.ExceptionRecord = NULL; + ExceptionRecord.NumberParameters = 1; + ExceptionRecord.ExceptionFlags = 0; + ExceptionRecord.ExceptionInformation[0] = AllocationSize; -oom: - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); - if (flags & HEAP_GENERATE_EXCEPTIONS) RtlRaiseStatus( STATUS_NO_MEMORY ); - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_NO_MEMORY ); - TRACE("(%p,%08x,%p,%08lx): returning oom\n", heap, flags, ptr, size ); - return NULL; + RtlRaiseException(&ExceptionRecord); + } -error: - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER ); - TRACE("(%p,%08x,%p,%08lx): returning error\n", heap, flags, ptr, size ); - return NULL; + /* Release the heap lock if it was acquired */ + if (HeapLocked) + RtlLeaveHeapLock(Heap->LockVariable); + + return Ptr; } @@ -1557,13 +3132,22 @@ RtlCompactHeap(HANDLE Heap, * @implemented */ BOOLEAN NTAPI -RtlLockHeap(IN HANDLE Heap) +RtlLockHeap(IN HANDLE HeapPtr) { - HEAP *heapPtr = HEAP_GetPtr( Heap ); - if (!heapPtr) - return FALSE; - RtlEnterHeapLock( &heapPtr->lock ); - return TRUE; + PHEAP Heap = (PHEAP)HeapPtr; + + // FIXME Check for special heap + + /* Check if it's really a heap */ + if (Heap->Signature != HEAP_SIGNATURE) return FALSE; + + /* Lock if it's lockable */ + if (!(Heap->Flags & HEAP_NO_SERIALIZE)) + { + RtlEnterHeapLock(Heap->LockVariable); + } + + return TRUE; } @@ -1581,13 +3165,22 @@ RtlLockHeap(IN HANDLE Heap) * @implemented */ BOOLEAN NTAPI -RtlUnlockHeap(HANDLE Heap) +RtlUnlockHeap(HANDLE HeapPtr) { - HEAP *heapPtr = HEAP_GetPtr( Heap ); - if (!heapPtr) - return FALSE; - RtlLeaveHeapLock( &heapPtr->lock ); - return TRUE; + PHEAP Heap = (PHEAP)HeapPtr; + + // FIXME Check for special heap + + /* Check if it's really a heap */ + if (Heap->Signature != HEAP_SIGNATURE) return FALSE; + + /* Unlock if it's lockable */ + if (!(Heap->Flags & HEAP_NO_SERIALIZE)) + { + RtlLeaveHeapLock(Heap->LockVariable); + } + + return TRUE; } @@ -1606,53 +3199,483 @@ RtlUnlockHeap(HANDLE Heap) */ SIZE_T NTAPI RtlSizeHeap( - HANDLE heap, - ULONG flags, - PVOID ptr + HANDLE HeapPtr, + ULONG Flags, + PVOID Ptr ) { - SIZE_T ret; - HEAP *heapPtr = HEAP_GetPtr( heap ); + PHEAP Heap = (PHEAP)HeapPtr; + PHEAP_ENTRY HeapEntry; + SIZE_T EntrySize; - if (!heapPtr) + // FIXME This is a hack around missing SEH support! + if (!Heap) { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE ); - return MAXULONG; + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_HANDLE); + return (SIZE_T)-1; } - flags &= HEAP_NO_SERIALIZE; - flags |= heapPtr->flags; - if (!(flags & HEAP_NO_SERIALIZE)) RtlEnterHeapLock( &heapPtr->lock ); - if (!HEAP_IsRealArena( heapPtr, HEAP_NO_SERIALIZE, ptr, QUIET )) + + /* Force flags */ + Flags |= Heap->ForceFlags; + + /* Call special heap */ + if (RtlpHeapIsSpecial(Flags)) + return RtlDebugSizeHeap(Heap, Flags, Ptr); + + /* Get the heap entry pointer */ + HeapEntry = (PHEAP_ENTRY)Ptr - 1; + + /* Return -1 if that entry is free */ + if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER ); - ret = MAXULONG; + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); + return (SIZE_T)-1; + } + + /* Get size of this block depending if it's a usual or a big one */ + if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) + { + EntrySize = RtlpGetSizeOfBigBlock(HeapEntry); } else { - const ARENA_INUSE *pArena = (const ARENA_INUSE *)ptr - 1; - ret = (pArena->size & ARENA_SIZE_MASK) - pArena->unused_bytes; + /* Calculate it */ + EntrySize = (HeapEntry->Size << HEAP_ENTRY_SHIFT) - HeapEntry->UnusedBytes; } - if (!(flags & HEAP_NO_SERIALIZE)) RtlLeaveHeapLock( &heapPtr->lock ); - TRACE("(%p,%08lx,%p): returning %08lx\n", heap, flags, ptr, ret ); - return ret; + /* Return calculated size */ + return EntrySize; } BOOLEAN NTAPI -RtlpValidateHeap(HEAP * Heap, BOOLEAN ForceValidation) +RtlpCheckInUsePattern(PHEAP_ENTRY HeapEntry) { - return HEAP_IsRealArena(Heap, Heap->flags, NULL, TRUE); -} + SIZE_T Size, Result; + PCHAR TailPart; -BOOLEAN NTAPI -RtlpValidateHeapHeaders(HEAP * Heap, BOOLEAN Recalculate) -{ + /* Calculate size */ + if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) + Size = RtlpGetSizeOfBigBlock(HeapEntry); + else + Size = (HeapEntry->Size << HEAP_ENTRY_SHIFT) - HeapEntry->UnusedBytes; + + /* Calculate pointer to the tail part of the block */ + TailPart = (PCHAR)(HeapEntry + 1) + Size; + + /* Compare tail pattern */ + Result = RtlCompareMemory(TailPart, + FillPattern, + HEAP_ENTRY_SIZE); + + if (Result != HEAP_ENTRY_SIZE) + { + DPRINT1("HEAP: Heap entry (size %x) %p tail is modified at %p\n", Size, HeapEntry, TailPart + Result); + return FALSE; + } + + /* All is fine */ return TRUE; } BOOLEAN NTAPI -RtlpValidateHeapEntry(HEAP * Heap, PVOID HeapEntry) +RtlpValidateHeapHeaders( + PHEAP Heap, + BOOLEAN Recalculate) { + // We skip header validation for now + return TRUE; +} + +BOOLEAN NTAPI +RtlpValidateHeapEntry( + PHEAP Heap, + PHEAP_ENTRY HeapEntry) +{ + BOOLEAN BigAllocation, EntryFound = FALSE; + PHEAP_SEGMENT Segment; + ULONG SegmentOffset; + + /* Perform various consistency checks of this entry */ + if (!HeapEntry) goto invalid_entry; + if ((ULONG_PTR)HeapEntry & (HEAP_ENTRY_SIZE - 1)) goto invalid_entry; + if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) goto invalid_entry; + + BigAllocation = HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC; + Segment = Heap->Segments[HeapEntry->SegmentOffset]; + + if (BigAllocation && + (((ULONG_PTR)HeapEntry & (PAGE_SIZE - 1)) != FIELD_OFFSET(HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock))) + goto invalid_entry; + + if (!BigAllocation && (HeapEntry->SegmentOffset >= HEAP_SEGMENTS || + !Segment || + HeapEntry < Segment->FirstEntry || + HeapEntry >= Segment->LastValidEntry)) + goto invalid_entry; + + if ((HeapEntry->Flags & HEAP_ENTRY_FILL_PATTERN) && + !RtlpCheckInUsePattern(HeapEntry)) + goto invalid_entry; + + /* Checks are done, if this is a virtual entry, that's all */ + if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) return TRUE; + + /* Go through segments and check if this entry fits into any of them */ + for (SegmentOffset = 0; SegmentOffset < HEAP_SEGMENTS; SegmentOffset++) + { + Segment = Heap->Segments[SegmentOffset]; + if (!Segment) continue; + + if ((HeapEntry >= Segment->FirstEntry) && + (HeapEntry < Segment->LastValidEntry)) + { + /* Got it */ + EntryFound = TRUE; + break; + } + } + + /* Return our result of finding entry in the segments */ + return EntryFound; + +invalid_entry: + DPRINT1("HEAP: Invalid heap entry %p in heap %p\n", HeapEntry, Heap); + return FALSE; +} + +BOOLEAN NTAPI +RtlpValidateHeapSegment( + PHEAP Heap, + PHEAP_SEGMENT Segment, + UCHAR SegmentOffset, + PULONG FreeEntriesCount, + PSIZE_T TotalFreeSize, + PSIZE_T TagEntries, + PSIZE_T PseudoTagEntries) +{ + PHEAP_UCR_DESCRIPTOR UcrDescriptor; + PLIST_ENTRY UcrEntry; + SIZE_T ByteSize, Size, Result; + PHEAP_ENTRY CurrentEntry; + ULONG UnCommittedPages; + ULONG UnCommittedRanges; + ULONG PreviousSize; + + UnCommittedPages = 0; + UnCommittedRanges = 0; + + if (IsListEmpty(&Segment->UCRSegmentList)) + { + UcrEntry = NULL; + UcrDescriptor = NULL; + } + else + { + UcrEntry = Segment->UCRSegmentList.Flink; + UcrDescriptor = CONTAINING_RECORD(UcrEntry, HEAP_UCR_DESCRIPTOR, SegmentEntry); + } + + if (Segment->BaseAddress == Heap) + CurrentEntry = &Heap->Entry; + else + CurrentEntry = &Segment->Entry; + + while (CurrentEntry < Segment->LastValidEntry) + { + if (UcrDescriptor && + ((PVOID)CurrentEntry >= UcrDescriptor->Address)) + { + DPRINT1("HEAP: Entry %p is not inside uncommited range [%p .. %p)\n", + CurrentEntry, UcrDescriptor->Address, + (PCHAR)UcrDescriptor->Address + UcrDescriptor->Size); + + return FALSE; + } + + PreviousSize = 0; + + while (CurrentEntry < Segment->LastValidEntry) + { + if (PreviousSize != CurrentEntry->PreviousSize) + { + DPRINT1("HEAP: Entry %p has incorrect PreviousSize %x instead of %x\n", + CurrentEntry, CurrentEntry->PreviousSize, PreviousSize); + + return FALSE; + } + + PreviousSize = CurrentEntry->Size; + Size = CurrentEntry->Size << HEAP_ENTRY_SHIFT; + + if (CurrentEntry->Flags & HEAP_ENTRY_BUSY) + { + if (TagEntries) + { + UNIMPLEMENTED; + } + + /* Check fill pattern */ + if (CurrentEntry->Flags & HEAP_ENTRY_FILL_PATTERN) + { + if (!RtlpCheckInUsePattern(CurrentEntry)) + return FALSE; + } + } + else + { + /* The entry is free, increase free entries count and total free size */ + *FreeEntriesCount = *FreeEntriesCount + 1; + *TotalFreeSize += CurrentEntry->Size; + + if ((Heap->Flags & HEAP_FREE_CHECKING_ENABLED) && + (CurrentEntry->Flags & HEAP_ENTRY_FILL_PATTERN)) + { + ByteSize = Size - sizeof(HEAP_FREE_ENTRY); + + if ((CurrentEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) && + (ByteSize > sizeof(HEAP_FREE_ENTRY_EXTRA))) + { + ByteSize -= sizeof(HEAP_FREE_ENTRY_EXTRA); + } + + Result = RtlCompareMemoryUlong((PCHAR)((PHEAP_FREE_ENTRY)CurrentEntry + 1), + ByteSize, + ARENA_FREE_FILLER); + + if (Result != ByteSize) + { + DPRINT1("HEAP: Free heap block %p modified at %p after it was freed\n", + CurrentEntry, + (PCHAR)(CurrentEntry + 1) + Result); + + return FALSE; + } + } + } + + if (CurrentEntry->SegmentOffset != SegmentOffset) + { + DPRINT1("HEAP: Heap entry %p SegmentOffset is incorrect %x (should be %x)\n", CurrentEntry, SegmentOffset, CurrentEntry->SegmentOffset); + return FALSE; + } + + /* Check if it's the last entry */ + if (CurrentEntry->Flags & HEAP_ENTRY_LAST_ENTRY) + { + CurrentEntry = (PHEAP_ENTRY)((PCHAR)CurrentEntry + Size); + + if (!UcrDescriptor) + { + /* Check if it's not really the last one */ + if (CurrentEntry != Segment->LastValidEntry) + { + DPRINT1("HEAP: Heap entry %p is not last block in segment (%x)\n", CurrentEntry, Segment->LastValidEntry); + return FALSE; + } + } + else if (CurrentEntry != UcrDescriptor->Address) + { + DPRINT1("HEAP: Heap entry %p does not match next uncommitted address (%p)\n", + CurrentEntry, UcrDescriptor->Address); + + return FALSE; + } + else + { + UnCommittedPages += (UcrDescriptor->Size / PAGE_SIZE); + UnCommittedRanges++; + + CurrentEntry = (PHEAP_ENTRY)((PCHAR)UcrDescriptor->Address + UcrDescriptor->Size); + + /* Go to the next UCR descriptor */ + UcrEntry = UcrEntry->Flink; + if (UcrEntry == &Segment->UCRSegmentList) + { + UcrEntry = NULL; + UcrDescriptor = NULL; + } + else + { + UcrDescriptor = CONTAINING_RECORD(UcrEntry, HEAP_UCR_DESCRIPTOR, SegmentEntry); + } + } + + break; + } + + /* Advance to the next entry */ + CurrentEntry = (PHEAP_ENTRY)((PCHAR)CurrentEntry + Size); + } + } + + /* Check total numbers of UCP and UCR */ + if (Segment->NumberOfUnCommittedPages != UnCommittedPages) + { + DPRINT1("HEAP: Segment %p NumberOfUnCommittedPages is invalid (%x != %x)\n", + Segment, Segment->NumberOfUnCommittedPages, UnCommittedPages); + + return FALSE; + } + + if (Segment->NumberOfUnCommittedRanges != UnCommittedRanges) + { + DPRINT1("HEAP: Segment %p NumberOfUnCommittedRanges is invalid (%x != %x)\n", + Segment, Segment->NumberOfUnCommittedRanges, UnCommittedRanges); + + return FALSE; + } + + return TRUE; +} + +BOOLEAN NTAPI +RtlpValidateHeap(PHEAP Heap, + BOOLEAN ForceValidation) +{ + PHEAP_SEGMENT Segment; + BOOLEAN EmptyList; + UCHAR SegmentOffset; + SIZE_T Size, TotalFreeSize; + ULONG PreviousSize; + PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; + PLIST_ENTRY ListHead, NextEntry; + PHEAP_FREE_ENTRY FreeEntry; + ULONG FreeBlocksCount, FreeListEntriesCount; + + /* Check headers */ + if (!RtlpValidateHeapHeaders(Heap, FALSE)) + return FALSE; + + /* Skip validation if it's not needed */ + if (!ForceValidation && !(Heap->Flags & HEAP_VALIDATE_ALL_ENABLED)) + return TRUE; + + /* Check free lists bitmaps */ + FreeListEntriesCount = 0; + ListHead = &Heap->FreeLists[0]; + + for (Size = 0; Size < HEAP_FREELISTS; Size++) + { + if (Size) + { + /* This is a dedicated list. Check if it's empty */ + EmptyList = IsListEmpty(ListHead); + + if (Heap->u.FreeListsInUseBytes[Size >> 3] & (1 << (Size & 7))) + { + if (EmptyList) + { + DPRINT1("HEAP: Empty %x-free list marked as non-empty\n", Size); + return FALSE; + } + } + else + { + if (!EmptyList) + { + DPRINT1("HEAP: Non-empty %x-free list marked as empty\n", Size); + return FALSE; + } + } + } + + /* Now check this list entries */ + NextEntry = ListHead->Flink; + PreviousSize = 0; + + while (ListHead != NextEntry) + { + FreeEntry = CONTAINING_RECORD(NextEntry, HEAP_FREE_ENTRY, FreeList); + NextEntry = NextEntry->Flink; + + /* If there is an in-use entry in a free list - that's quite a big problem */ + if (FreeEntry->Flags & HEAP_ENTRY_BUSY) + { + DPRINT1("HEAP: %x-dedicated list free element %x is marked in-use\n", Size, FreeEntry); + return FALSE; + } + + /* Check sizes according to that specific list's size */ + if ((Size == 0) && (FreeEntry->Size < HEAP_FREELISTS)) + { + DPRINT1("HEAP: Non dedicated list free element %x has size %x which would fit a dedicated list\n", FreeEntry, FreeEntry->Size); + return FALSE; + } + else if (Size && (FreeEntry->Size != Size)) + { + DPRINT1("HEAP: %x-dedicated list free element %x has incorrect size %x\n", Size, FreeEntry, FreeEntry->Size); + return FALSE; + } + else if ((Size == 0) && (FreeEntry->Size < PreviousSize)) + { + DPRINT1("HEAP: Non dedicated list free element %x is not put in order\n", FreeEntry); + return FALSE; + } + + /* Remember previous size*/ + PreviousSize = FreeEntry->Size; + + /* Add up to the total amount of free entries */ + FreeListEntriesCount++; + } + + /* Go to the head of the next free list */ + ListHead++; + } + + /* Check big allocations */ + ListHead = &Heap->VirtualAllocdBlocks; + NextEntry = ListHead->Flink; + + while (ListHead != NextEntry) + { + VirtualAllocBlock = CONTAINING_RECORD(NextEntry, HEAP_VIRTUAL_ALLOC_ENTRY, Entry); + + /* We can only check the fill pattern */ + if (VirtualAllocBlock->BusyBlock.Flags & HEAP_ENTRY_FILL_PATTERN) + { + if (!RtlpCheckInUsePattern(&VirtualAllocBlock->BusyBlock)) + return FALSE; + } + + NextEntry = NextEntry->Flink; + } + + /* Check all segments */ + FreeBlocksCount = 0; + TotalFreeSize = 0; + + for (SegmentOffset = 0; SegmentOffset < HEAP_SEGMENTS; SegmentOffset++) + { + Segment = Heap->Segments[SegmentOffset]; + + /* Go to the next one if there is no segment */ + if (!Segment) continue; + + if (!RtlpValidateHeapSegment(Heap, + Segment, + SegmentOffset, + &FreeBlocksCount, + &TotalFreeSize, + NULL, + NULL)) + { + return FALSE; + } + } + + if (FreeListEntriesCount != FreeBlocksCount) + { + DPRINT1("HEAP: Free blocks count in arena (%d) does not match free blocks number in the free lists (%d)\n", FreeBlocksCount, FreeListEntriesCount); + return FALSE; + } + + if (Heap->TotalFreeSize != TotalFreeSize) + { + DPRINT1("HEAP: Total size of free blocks in arena (%d) does not equal to the one in heap header (%d)\n", TotalFreeSize, Heap->TotalFreeSize); + return FALSE; + } + return TRUE; } @@ -1675,29 +3698,62 @@ RtlpValidateHeapEntry(HEAP * Heap, PVOID HeapEntry) * @implemented */ BOOLEAN NTAPI RtlValidateHeap( - HANDLE Heap, + HANDLE HeapPtr, ULONG Flags, PVOID Block ) { - HEAP *heapPtr = HEAP_GetPtr( Heap ); - if (!heapPtr) - return FALSE; - return HEAP_IsRealArena( heapPtr, Flags, Block, QUIET ); + PHEAP Heap = (PHEAP)HeapPtr; + BOOLEAN HeapLocked = FALSE; + BOOLEAN HeapValid; + + // FIXME Check for special heap + + /* Check signature */ + if (Heap->Signature != HEAP_SIGNATURE) + { + DPRINT1("HEAP: Signature %x is invalid for heap %p\n", Heap->Signature, Heap); + return FALSE; + } + + /* Force flags */ + Flags = Heap->ForceFlags; + + /* Acquire the lock if necessary */ + if (!(Flags & HEAP_NO_SERIALIZE)) + { + RtlEnterHeapLock(Heap->LockVariable); + HeapLocked = TRUE; + } + + /* Either validate whole heap or just one entry */ + if (!Block) + HeapValid = RtlpValidateHeap(Heap, TRUE); + else + HeapValid = RtlpValidateHeapEntry(Heap, (PHEAP_ENTRY)Block - 1); + + /* Unlock if it's lockable */ + if (HeapLocked) + { + RtlLeaveHeapLock(Heap->LockVariable); + } + + return HeapValid; } VOID RtlInitializeHeapManager(VOID) { - PPEB Peb; + PPEB Peb; - Peb = NtCurrentPeb(); + /* Get PEB */ + Peb = RtlGetCurrentPeb(); - Peb->NumberOfHeaps = 0; - Peb->MaximumNumberOfHeaps = -1; /* no limit */ - Peb->ProcessHeaps = NULL; + /* Initialize heap-related fields of PEB */ + Peb->NumberOfHeaps = 0; - //RtlInitializeHeapLock(&RtlpProcessHeapsListLock); + /* Initialize the process heaps list protecting lock */ + RtlInitializeHeapLock(&RtlpProcessHeapsListLock); } @@ -1708,21 +3764,8 @@ NTSTATUS NTAPI RtlEnumProcessHeaps(PHEAP_ENUMERATION_ROUTINE HeapEnumerationRoutine, PVOID lParam) { - NTSTATUS Status = STATUS_SUCCESS; - - struct list *ptr=NULL; - RtlEnterHeapLock(&processHeap->lock); - Status=HeapEnumerationRoutine(processHeap,lParam); - - LIST_FOR_EACH( ptr, &processHeap->entry ) - { - if (!NT_SUCCESS(Status)) break; - Status = HeapEnumerationRoutine(ptr,lParam); - } - - RtlLeaveHeapLock(&processHeap->lock); - - return Status; + UNIMPLEMENTED; + return STATUS_NOT_IMPLEMENTED; } @@ -1731,26 +3774,10 @@ RtlEnumProcessHeaps(PHEAP_ENUMERATION_ROUTINE HeapEnumerationRoutine, */ ULONG NTAPI RtlGetProcessHeaps(ULONG count, - HANDLE *heaps ) + HANDLE *heaps) { - ULONG total = 1; /* main heap */ - struct list *ptr; - ULONG i=0; - RtlEnterHeapLock( &processHeap->lock ); - LIST_FOR_EACH( ptr, &processHeap->entry ) total++; - //if (total <= count) - { - *(heaps++) = processHeap; - i++; - LIST_FOR_EACH( ptr, &processHeap->entry ) - { - if (i >= count) break; - i++; - *(heaps++) = LIST_ENTRY( ptr, HEAP, entry ); - } - } - RtlLeaveHeapLock( &processHeap->lock ); - return i; + UNIMPLEMENTED; + return 0; } @@ -1760,22 +3787,8 @@ RtlGetProcessHeaps(ULONG count, BOOLEAN NTAPI RtlValidateProcessHeaps(VOID) { - BOOLEAN Result = TRUE; - HEAP ** pptr; - - RtlEnterHeapLock( &processHeap->lock ); - - for (pptr = (HEAP**)&NtCurrentPeb()->ProcessHeaps; *pptr; pptr++) - { - if (!RtlValidateHeap(*pptr, 0, NULL)) - { - Result = FALSE; - break; - } - } - - RtlLeaveHeapLock( &processHeap->lock ); - return Result; + UNIMPLEMENTED; + return TRUE; } @@ -1802,22 +3815,52 @@ RtlSetUserValueHeap(IN PVOID HeapHandle, IN PVOID BaseAddress, IN PVOID UserValue) { - HEAP *heapPtr; - PHEAP_USER_DATA udata; - - heapPtr = HEAP_GetPtr(HeapHandle); - if (!heapPtr) + PHEAP Heap = (PHEAP)HeapHandle; + PHEAP_ENTRY HeapEntry; + PHEAP_ENTRY_EXTRA Extra; + BOOLEAN HeapLocked = FALSE; + + /* Force flags */ + Flags |= Heap->Flags; + + /* Call special heap */ + if (RtlpHeapIsSpecial(Flags)) + return RtlDebugSetUserValueHeap(Heap, Flags, BaseAddress, UserValue); + + /* Lock if it's lockable */ + if (!(Heap->Flags & HEAP_NO_SERIALIZE)) { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE ); + RtlEnterHeapLock(Heap->LockVariable); + HeapLocked = TRUE; + } + + /* Get a pointer to the entry */ + HeapEntry = (PHEAP_ENTRY)BaseAddress - 1; + + /* If it's a free entry - return error */ + if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) + { + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); + + /* Release the heap lock if it was acquired */ + if (HeapLocked) + RtlLeaveHeapLock(Heap->LockVariable); + return FALSE; } - udata = HEAP_GetUserData(heapPtr, BaseAddress); - if (!udata) + + /* Check if this entry has an extra stuff associated with it */ + if (HeapEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) { - udata = HEAP_AllocUserData(heapPtr, BaseAddress); - if (!udata) return FALSE; + /* Use extra to store the value */ + Extra = RtlpGetExtraStuffPointer(HeapEntry); + Extra->Settable = (ULONG_PTR)UserValue; } - udata->UserValue = UserValue; + + /* Release the heap lock if it was acquired */ + if (HeapLocked) + RtlLeaveHeapLock(Heap->LockVariable); + return TRUE; } @@ -1832,22 +3875,47 @@ RtlSetUserFlagsHeap(IN PVOID HeapHandle, IN ULONG UserFlagsReset, IN ULONG UserFlagsSet) { - HEAP *heapPtr; - PHEAP_USER_DATA udata; + PHEAP Heap = (PHEAP)HeapHandle; + PHEAP_ENTRY HeapEntry; + BOOLEAN HeapLocked = FALSE; - heapPtr = HEAP_GetPtr(HeapHandle); - if (!heapPtr) + /* Force flags */ + Flags |= Heap->Flags; + + /* Call special heap */ + if (RtlpHeapIsSpecial(Flags)) + return RtlDebugSetUserFlagsHeap(Heap, Flags, BaseAddress, UserFlagsReset, UserFlagsSet); + + /* Lock if it's lockable */ + if (!(Heap->Flags & HEAP_NO_SERIALIZE)) { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE ); + RtlEnterHeapLock(Heap->LockVariable); + HeapLocked = TRUE; + } + + /* Get a pointer to the entry */ + HeapEntry = (PHEAP_ENTRY)BaseAddress - 1; + + /* If it's a free entry - return error */ + if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) + { + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); + + /* Release the heap lock if it was acquired */ + if (HeapLocked) + RtlLeaveHeapLock(Heap->LockVariable); + return FALSE; } - udata = HEAP_GetUserData(heapPtr, BaseAddress); - if (!udata) - { - udata = HEAP_AllocUserData(heapPtr, BaseAddress); - if (!udata) return FALSE; - } - udata->UserFlags = UserFlagsSet & HEAP_SETTABLE_USER_FLAGS; + + /* Set / reset flags */ + HeapEntry->Flags &= ~(UserFlagsReset >> 4); + HeapEntry->Flags |= (UserFlagsSet >> 4); + + /* Release the heap lock if it was acquired */ + if (HeapLocked) + RtlLeaveHeapLock(Heap->LockVariable); + return TRUE; } @@ -1862,23 +3930,59 @@ RtlGetUserInfoHeap(IN PVOID HeapHandle, OUT PVOID *UserValue, OUT PULONG UserFlags) { - HEAP *heapPtr; - PHEAP_USER_DATA udata; + PHEAP Heap = (PHEAP)HeapHandle; + PHEAP_ENTRY HeapEntry; + PHEAP_ENTRY_EXTRA Extra; + BOOLEAN HeapLocked = FALSE; - heapPtr = HEAP_GetPtr(HeapHandle); - if (!heapPtr) + /* Force flags */ + Flags |= Heap->Flags; + + /* Call special heap */ + if (RtlpHeapIsSpecial(Flags)) + return RtlDebugGetUserInfoHeap(Heap, Flags, BaseAddress, UserValue, UserFlags); + + /* Lock if it's lockable */ + if (!(Heap->Flags & HEAP_NO_SERIALIZE)) { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE ); + RtlEnterHeapLock(Heap->LockVariable); + HeapLocked = TRUE; + } + + /* Get a pointer to the entry */ + HeapEntry = (PHEAP_ENTRY)BaseAddress - 1; + + /* If it's a free entry - return error */ + if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) + { + RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); + + /* Release the heap lock if it was acquired */ + if (HeapLocked) + RtlLeaveHeapLock(Heap->LockVariable); + return FALSE; } - udata = HEAP_GetUserData(heapPtr, BaseAddress); - if (!udata) + + /* Check if this entry has an extra stuff associated with it */ + if (HeapEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_PARAMETER ); - return FALSE; + /* Get pointer to extra data */ + Extra = RtlpGetExtraStuffPointer(HeapEntry); + + /* Pass user value */ + if (UserValue) + *UserValue = (PVOID)Extra->Settable; + + /* Decode and return user flags */ + if (UserFlags) + *UserFlags = (HeapEntry->Flags & HEAP_ENTRY_SETTABLE_FLAGS) << 4; } - if (UserValue) *UserValue = udata->UserValue; - if (UserFlags) *UserFlags = udata->UserFlags; + + /* Release the heap lock if it was acquired */ + if (HeapLocked) + RtlLeaveHeapLock(Heap->LockVariable); + return TRUE; } @@ -1958,8 +4062,24 @@ RtlSetHeapInformation(IN HANDLE HeapHandle OPTIONAL, IN PVOID HeapInformation, IN SIZE_T HeapInformationLength) { - UNIMPLEMENTED; - return 0; + /* Setting heap information is not really supported except for enabling LFH */ + if (HeapInformationClass == 0) return STATUS_SUCCESS; + + /* Check buffer length */ + if (HeapInformationLength < sizeof(ULONG)) + { + /* The provided buffer is too small */ + return STATUS_BUFFER_TOO_SMALL; + } + + /* Check for a special magic value for enabling LFH */ + if (*(PULONG)HeapInformation == 2) + { + DPRINT1("RtlSetHeapInformation() needs to enable LFH\n"); + return STATUS_SUCCESS; + } + + return STATUS_UNSUCCESSFUL; } NTSTATUS @@ -1970,30 +4090,26 @@ RtlQueryHeapInformation(HANDLE HeapHandle, SIZE_T HeapInformationLength OPTIONAL, PSIZE_T ReturnLength OPTIONAL) { - HEAP *heapPtr; + PHEAP Heap = (PHEAP)HeapHandle; - heapPtr = HEAP_GetPtr(HeapHandle); - if (!heapPtr) + /* Only HeapCompatibilityInformation is supported */ + if (HeapInformationClass != HeapCompatibilityInformation) + return STATUS_UNSUCCESSFUL; + + /* Set result length */ + if (ReturnLength) *ReturnLength = sizeof(ULONG); + + /* Check buffer length */ + if (HeapInformationLength < sizeof(ULONG)) { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus( STATUS_INVALID_HANDLE ); - return FALSE; + /* It's too small, return needed length */ + return STATUS_BUFFER_TOO_SMALL; } - UNIMPLEMENTED + /* Return front end heap type */ + *(PULONG)HeapInformation = Heap->FrontEndHeapType; - switch (HeapInformationClass) - { - case HeapCompatibilityInformation: - if (ReturnLength) *ReturnLength = sizeof(ULONG); - - if (HeapInformationLength < sizeof(ULONG)) - return STATUS_BUFFER_TOO_SMALL; - - *(ULONG *)HeapInformation = 0; /* standard heap */ - return STATUS_SUCCESS; - default: - return STATUS_INVALID_INFO_CLASS; - } + return STATUS_SUCCESS; } NTSTATUS @@ -2018,3 +4134,5 @@ RtlMultipleFreeHeap(IN PVOID HeapHandle, UNIMPLEMENTED; return 0; } + +/* EOF */ diff --git a/reactos/lib/rtl/heap_rewrite.c b/reactos/lib/rtl/heap_rewrite.c deleted file mode 100644 index 2c28b6a76d7..00000000000 --- a/reactos/lib/rtl/heap_rewrite.c +++ /dev/null @@ -1,4138 +0,0 @@ -/* COPYRIGHT: See COPYING in the top level directory - * PROJECT: ReactOS system libraries - * FILE: lib/rtl/heap.c - * PURPOSE: RTL Heap backend allocator - * PROGRAMMERS: Copyright 2010 Aleksey Bragin - */ - -/* Useful references: - http://msdn.microsoft.com/en-us/library/ms810466.aspx - http://msdn.microsoft.com/en-us/library/ms810603.aspx - http://www.securitylab.ru/analytics/216376.php - http://binglongx.spaces.live.com/blog/cns!142CBF6D49079DE8!596.entry - http://www.phreedom.org/research/exploits/asn1-bitstring/ - http://illmatics.com/Understanding_the_LFH.pdf - http://www.alex-ionescu.com/?p=18 -*/ - -/* INCLUDES *****************************************************************/ - -#include -#include - -#define NDEBUG -#include - -HEAP_LOCK RtlpProcessHeapsListLock; - -/* Bitmaps stuff */ - -/* How many least significant bits are clear */ -UCHAR RtlpBitsClearLow[] = -{ - 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, - 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 -}; - -UCHAR FORCEINLINE -RtlpFindLeastSetBit(ULONG Bits) -{ - if (Bits & 0xFFFF) - { - if (Bits & 0xFF) - return RtlpBitsClearLow[Bits & 0xFF]; /* Lowest byte */ - else - return RtlpBitsClearLow[(Bits >> 8) & 0xFF] + 8; /* 2nd byte */ - } - else - { - if ((Bits >> 16) & 0xFF) - return RtlpBitsClearLow[(Bits >> 16) & 0xFF] + 16; /* 3rd byte */ - else - return RtlpBitsClearLow[(Bits >> 24) & 0xFF] + 24; /* Highest byte */ - } -} - -/* Maximum size of a tail-filling pattern used for compare operation */ -UCHAR FillPattern[HEAP_ENTRY_SIZE] = -{ - HEAP_TAIL_FILL, - HEAP_TAIL_FILL, - HEAP_TAIL_FILL, - HEAP_TAIL_FILL, - HEAP_TAIL_FILL, - HEAP_TAIL_FILL, - HEAP_TAIL_FILL, - HEAP_TAIL_FILL -}; - - -ULONG NTAPI -RtlCompareMemoryUlong(PVOID Source, ULONG Length, ULONG Value); - -/* FUNCTIONS *****************************************************************/ - -VOID NTAPI -RtlpInitializeHeap(PHEAP Heap, - PULONG HeaderSize, - ULONG Flags, - BOOLEAN AllocateLock, - PVOID Lock) -{ - PVOID NextHeapBase = Heap + 1; - PHEAP_UCR_DESCRIPTOR UcrDescriptor; - ULONG NumUCRs = 8; - ULONG i; - NTSTATUS Status; - - /* Add UCRs size */ - *HeaderSize += NumUCRs * sizeof(*UcrDescriptor); - - /* Prepare a list of UCRs */ - InitializeListHead(&Heap->UCRList); - InitializeListHead(&Heap->UCRSegments); - UcrDescriptor = NextHeapBase; - - for (i=0; iUCRList, &UcrDescriptor->ListEntry); - } - - NextHeapBase = UcrDescriptor; - // TODO: Add tagging - - /* Round up header size again */ - *HeaderSize = ROUND_UP(*HeaderSize, HEAP_ENTRY_SIZE); - - ASSERT(*HeaderSize <= PAGE_SIZE); - - /* Initialize heap's header */ - Heap->Entry.Size = (*HeaderSize) >> HEAP_ENTRY_SHIFT; - Heap->Entry.Flags = HEAP_ENTRY_BUSY; - - Heap->Signature = HEAP_SIGNATURE; - Heap->Flags = Flags; - Heap->ForceFlags = (Flags & (HEAP_NO_SERIALIZE | - HEAP_GENERATE_EXCEPTIONS | - HEAP_ZERO_MEMORY | - HEAP_REALLOC_IN_PLACE_ONLY | - HEAP_VALIDATE_PARAMETERS_ENABLED | - HEAP_VALIDATE_ALL_ENABLED | - HEAP_TAIL_CHECKING_ENABLED | - HEAP_CREATE_ALIGN_16 | - HEAP_FREE_CHECKING_ENABLED)); - Heap->HeaderValidateCopy = NULL; - Heap->HeaderValidateLength = ((PCHAR)NextHeapBase - (PCHAR)Heap); - - /* Initialize free lists */ - for (i=0; iFreeLists[i]); - } - - /* Initialize "big" allocations list */ - InitializeListHead(&Heap->VirtualAllocdBlocks); - - /* Initialize lock */ - if (AllocateLock) - { - Lock = NextHeapBase; - Status = RtlInitializeHeapLock((PHEAP_LOCK)Lock); - if (!NT_SUCCESS(Status)) - { - DPRINT1("Initializing the lock failed!\n"); - return /*NULL*/; // FIXME! - } - } - - /* Set the lock variable */ - Heap->LockVariable = Lock; -} - -VOID FORCEINLINE -RtlpSetFreeListsBit(PHEAP Heap, - PHEAP_FREE_ENTRY FreeEntry) -{ - ULONG Index, Bit; - - ASSERT(FreeEntry->Size < HEAP_FREELISTS); - - /* Calculate offset in the free list bitmap */ - Index = FreeEntry->Size >> 3; /* = FreeEntry->Size / (sizeof(UCHAR) * 8)*/ - Bit = 1 << (FreeEntry->Size & 7); - - /* Assure it's not already set */ - ASSERT((Heap->u.FreeListsInUseBytes[Index] & Bit) == 0); - - /* Set it */ - Heap->u.FreeListsInUseBytes[Index] |= Bit; -} - -VOID FORCEINLINE -RtlpClearFreeListsBit(PHEAP Heap, - PHEAP_FREE_ENTRY FreeEntry) -{ - ULONG Index, Bit; - - ASSERT(FreeEntry->Size < HEAP_FREELISTS); - - /* Calculate offset in the free list bitmap */ - Index = FreeEntry->Size >> 3; /* = FreeEntry->Size / (sizeof(UCHAR) * 8)*/ - Bit = 1 << (FreeEntry->Size & 7); - - /* Assure it was set and the corresponding free list is empty */ - ASSERT(Heap->u.FreeListsInUseBytes[Index] & Bit); - ASSERT(IsListEmpty(&Heap->FreeLists[FreeEntry->Size])); - - /* Clear it */ - Heap->u.FreeListsInUseBytes[Index] ^= Bit; -} - -VOID NTAPI -RtlpInsertFreeBlockHelper(PHEAP Heap, - PHEAP_FREE_ENTRY FreeEntry, - SIZE_T BlockSize, - BOOLEAN NoFill) -{ - PLIST_ENTRY FreeListHead, Current; - PHEAP_FREE_ENTRY CurrentEntry; - - ASSERT(FreeEntry->Size == BlockSize); - - /* Fill if it's not denied */ - if (!NoFill) - { - FreeEntry->Flags &= ~(HEAP_ENTRY_FILL_PATTERN | - HEAP_ENTRY_EXTRA_PRESENT | - HEAP_ENTRY_BUSY); - - if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) - { - RtlFillMemoryUlong((PCHAR)(FreeEntry + 1), - (BlockSize << HEAP_ENTRY_SHIFT) - sizeof(*FreeEntry), - ARENA_FREE_FILLER); - - FreeEntry->Flags |= HEAP_ENTRY_FILL_PATTERN; - } - } - else - { - /* Clear out all flags except the last entry one */ - FreeEntry->Flags &= HEAP_ENTRY_LAST_ENTRY; - } - - /* Check if PreviousSize of the next entry matches ours */ - if (!(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) - { - ASSERT(((PHEAP_ENTRY)FreeEntry + BlockSize)->PreviousSize = BlockSize); - } - - /* Insert it either into dedicated or non-dedicated list */ - if (BlockSize < HEAP_FREELISTS) - { - /* Dedicated list */ - FreeListHead = &Heap->FreeLists[BlockSize]; - - if (IsListEmpty(FreeListHead)) - { - RtlpSetFreeListsBit(Heap, FreeEntry); - } - } - else - { - /* Non-dedicated one */ - FreeListHead = &Heap->FreeLists[0]; - Current = FreeListHead->Flink; - - /* Find a position where to insert it to (the list must be sorted) */ - while (FreeListHead != Current) - { - CurrentEntry = CONTAINING_RECORD(Current, HEAP_FREE_ENTRY, FreeList); - - if (BlockSize <= CurrentEntry->Size) - break; - - Current = Current->Flink; - } - - FreeListHead = Current; - } - - /* Actually insert it into the list */ - InsertTailList(FreeListHead, &FreeEntry->FreeList); -} - -VOID NTAPI -RtlpInsertFreeBlock(PHEAP Heap, - PHEAP_FREE_ENTRY FreeEntry, - SIZE_T BlockSize) -{ - USHORT Size, PreviousSize; - UCHAR SegmentOffset, Flags; - PHEAP_SEGMENT Segment; - - DPRINT("RtlpInsertFreeBlock(%p %p %x)\n", Heap, FreeEntry, BlockSize); - - /* Increase the free size counter */ - Heap->TotalFreeSize += BlockSize; - - /* Remember certain values */ - Flags = FreeEntry->Flags; - PreviousSize = FreeEntry->PreviousSize; - SegmentOffset = FreeEntry->SegmentOffset; - Segment = Heap->Segments[SegmentOffset]; - - /* Process it */ - while (BlockSize) - { - /* Check for the max size */ - if (BlockSize > HEAP_MAX_BLOCK_SIZE) - { - Size = HEAP_MAX_BLOCK_SIZE; - - /* Special compensation if it goes above limit just by 1 */ - if (BlockSize == (HEAP_MAX_BLOCK_SIZE + 1)) - Size -= 16; - - FreeEntry->Flags = 0; - } - else - { - Size = BlockSize; - FreeEntry->Flags = Flags; - } - - /* Change its size and insert it into a free list */ - FreeEntry->Size = Size; - FreeEntry->PreviousSize = PreviousSize; - FreeEntry->SegmentOffset = SegmentOffset; - - /* Call a helper to actually insert the block */ - RtlpInsertFreeBlockHelper(Heap, FreeEntry, Size, FALSE); - - /* Update sizes */ - PreviousSize = Size; - BlockSize -= Size; - - /* Go to the next entry */ - FreeEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeEntry + Size); - - /* Check if that's all */ - if ((PHEAP_ENTRY)FreeEntry >= Segment->LastValidEntry) return; - } - - /* Update previous size if needed */ - if (!(Flags & HEAP_ENTRY_LAST_ENTRY)) - FreeEntry->PreviousSize = PreviousSize; -} - -VOID NTAPI -RtlpRemoveFreeBlock(PHEAP Heap, - PHEAP_FREE_ENTRY FreeEntry, - BOOLEAN Dedicated, - BOOLEAN NoFill) -{ - SIZE_T Result, RealSize; - PLIST_ENTRY OldBlink, OldFlink; - - // FIXME: Maybe use RemoveEntryList? - - /* Remove the free block */ - OldFlink = FreeEntry->FreeList.Flink; - OldBlink = FreeEntry->FreeList.Blink; - OldBlink->Flink = OldFlink; - OldFlink->Blink = OldBlink; - - /* Update the freelists bitmap */ - if ((OldFlink == OldBlink) && - (Dedicated || (!Dedicated && FreeEntry->Size < HEAP_FREELISTS))) - { - RtlpClearFreeListsBit(Heap, FreeEntry); - } - - /* Fill with pattern if necessary */ - if (!NoFill && - (FreeEntry->Flags & HEAP_ENTRY_FILL_PATTERN)) - { - RealSize = (FreeEntry->Size << HEAP_ENTRY_SHIFT) - sizeof(*FreeEntry); - - /* Deduct extra stuff from block's real size */ - if (FreeEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT && - RealSize > sizeof(HEAP_FREE_ENTRY_EXTRA)) - { - RealSize -= sizeof(HEAP_FREE_ENTRY_EXTRA); - } - - /* Check if the free filler is intact */ - Result = RtlCompareMemoryUlong((PCHAR)(FreeEntry + 1), - RealSize, - ARENA_FREE_FILLER); - - if (Result != RealSize) - { - DPRINT1("Free heap block %p modified at %p after it was freed\n", - FreeEntry, - (PCHAR)(FreeEntry + 1) + Result); - } - } -} - -SIZE_T NTAPI -RtlpGetSizeOfBigBlock(PHEAP_ENTRY HeapEntry) -{ - PHEAP_VIRTUAL_ALLOC_ENTRY VirtualEntry; - - /* Get pointer to the containing record */ - VirtualEntry = CONTAINING_RECORD(HeapEntry, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); - - /* Restore the real size */ - return VirtualEntry->CommitSize - HeapEntry->Size; -} - -PHEAP_UCR_DESCRIPTOR NTAPI -RtlpCreateUnCommittedRange(PHEAP_SEGMENT Segment) -{ - PLIST_ENTRY Entry; - PHEAP_UCR_DESCRIPTOR UcrDescriptor; - PHEAP_UCR_SEGMENT UcrSegment; - PHEAP Heap = Segment->Heap; - SIZE_T ReserveSize = 16 * PAGE_SIZE; - SIZE_T CommitSize = 1 * PAGE_SIZE; - NTSTATUS Status; - - DPRINT("RtlpCreateUnCommittedRange(%p)\n", Segment); - - /* Check if we have unused UCRs */ - if (IsListEmpty(&Heap->UCRList)) - { - /* Get a pointer to the first UCR segment */ - UcrSegment = CONTAINING_RECORD(&Heap->UCRSegments.Flink, HEAP_UCR_SEGMENT, ListEntry); - - /* Check the list of UCR segments */ - if (IsListEmpty(&Heap->UCRSegments) || - UcrSegment->ReservedSize == UcrSegment->CommittedSize) - { - /* We need to create a new one. Reserve 16 pages for it */ - UcrSegment = NULL; - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID *)&UcrSegment, - 0, - &ReserveSize, - MEM_RESERVE, - PAGE_READWRITE); - - if (!NT_SUCCESS(Status)) return NULL; - - /* Commit one page */ - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID *)&UcrSegment, - 0, - &CommitSize, - MEM_COMMIT, - PAGE_READWRITE); - - if (!NT_SUCCESS(Status)) - { - /* Release reserved memory */ - ZwFreeVirtualMemory(NtCurrentProcess(), - (PVOID *)&UcrDescriptor, - &ReserveSize, - MEM_RELEASE); - return NULL; - } - - /* Set it's data */ - UcrSegment->ReservedSize = ReserveSize; - UcrSegment->CommittedSize = CommitSize; - - /* Add it to the head of the list */ - InsertHeadList(&Heap->UCRSegments, &UcrSegment->ListEntry); - - /* Get a pointer to the first available UCR descriptor */ - UcrDescriptor = (PHEAP_UCR_DESCRIPTOR)(UcrSegment + 1); - } - else - { - /* It's possible to use existing UCR segment. Commit one more page */ - UcrDescriptor = (PHEAP_UCR_DESCRIPTOR)((PCHAR)UcrSegment + UcrSegment->CommittedSize); - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID *)&UcrDescriptor, - 0, - &CommitSize, - MEM_COMMIT, - PAGE_READWRITE); - - if (!NT_SUCCESS(Status)) return NULL; - - /* Update sizes */ - UcrSegment->CommittedSize += CommitSize; - } - - /* There is a whole bunch of new UCR descriptors. Put them into the unused list */ - while ((PCHAR)UcrDescriptor < ((PCHAR)UcrSegment + UcrSegment->CommittedSize)) - { - InsertTailList(&Heap->UCRList, &UcrDescriptor->ListEntry); - UcrDescriptor++; - } - } - - /* There are unused UCRs, just get the first one */ - Entry = RemoveHeadList(&Heap->UCRList); - UcrDescriptor = CONTAINING_RECORD(Entry, HEAP_UCR_DESCRIPTOR, ListEntry); - return UcrDescriptor; -} - -VOID NTAPI -RtlpDestroyUnCommittedRange(PHEAP_SEGMENT Segment, - PHEAP_UCR_DESCRIPTOR UcrDescriptor) -{ - /* Zero it out */ - UcrDescriptor->Address = NULL; - UcrDescriptor->Size = 0; - - /* Put it into the heap's list of unused UCRs */ - InsertHeadList(&Segment->Heap->UCRList, &UcrDescriptor->ListEntry); -} - -VOID NTAPI -RtlpInsertUnCommittedPages(PHEAP_SEGMENT Segment, - ULONG_PTR Address, - SIZE_T Size) -{ - PLIST_ENTRY Current; - PHEAP_UCR_DESCRIPTOR UcrDescriptor; - - DPRINT("RtlpInsertUnCommittedPages(%p %p %x)\n", Segment, Address, Size); - - /* Go through the list of UCR descriptors, they are sorted from lowest address - to the highest */ - Current = Segment->UCRSegmentList.Flink; - while(Current != &Segment->UCRSegmentList) - { - UcrDescriptor = CONTAINING_RECORD(Current, HEAP_UCR_DESCRIPTOR, SegmentEntry); - - if ((ULONG_PTR)UcrDescriptor->Address > Address) - { - /* Check for a really lucky case */ - if ((Address + Size) == (ULONG_PTR)UcrDescriptor->Address) - { - /* Exact match */ - UcrDescriptor->Address = (PVOID)Address; - UcrDescriptor->Size += Size; - return; - } - - /* We found the block after which the new one should go */ - break; - } - else if (((ULONG_PTR)UcrDescriptor->Address + UcrDescriptor->Size) == Address) - { - /* Modify this entry */ - Address = (ULONG_PTR)UcrDescriptor->Address; - Size += UcrDescriptor->Size; - - /* Remove it from the list and destroy it */ - RemoveEntryList(Current); - RtlpDestroyUnCommittedRange(Segment, UcrDescriptor); - - Segment->NumberOfUnCommittedRanges--; - } - else - { - /* Advance to the next descriptor */ - Current = Current->Flink; - } - } - - /* Create a new UCR descriptor */ - UcrDescriptor = RtlpCreateUnCommittedRange(Segment); - if (!UcrDescriptor) return; - - UcrDescriptor->Address = (PVOID)Address; - UcrDescriptor->Size = Size; - - /* "Current" is the descriptor after which our one should go */ - InsertTailList(Current, &UcrDescriptor->SegmentEntry); - - DPRINT("Added segment UCR with base %p, size 0x%x\n", Address, Size); - - /* Increase counters */ - Segment->NumberOfUnCommittedRanges++; -} - -PHEAP_FREE_ENTRY NTAPI -RtlpFindAndCommitPages(PHEAP Heap, - PHEAP_SEGMENT Segment, - PSIZE_T Size, - PVOID AddressRequested) -{ - PLIST_ENTRY Current; - ULONG_PTR Address = 0; - PHEAP_UCR_DESCRIPTOR UcrDescriptor, PreviousUcr = NULL; - PHEAP_ENTRY FirstEntry, LastEntry, PreviousLastEntry; - NTSTATUS Status; - - DPRINT("RtlpFindAndCommitPages(%p %p %x %p)\n", Heap, Segment, *Size, Address); - - /* Go through UCRs in a segment */ - Current = Segment->UCRSegmentList.Flink; - while(Current != &Segment->UCRSegmentList) - { - UcrDescriptor = CONTAINING_RECORD(Current, HEAP_UCR_DESCRIPTOR, SegmentEntry); - - /* Check if we can use that one right away */ - if (UcrDescriptor->Size >= *Size && - (UcrDescriptor->Address == AddressRequested || !AddressRequested)) - { - /* Get the address */ - Address = (ULONG_PTR)UcrDescriptor->Address; - - /* Commit it */ - if (Heap->CommitRoutine) - { - Status = Heap->CommitRoutine(Heap, (PVOID *)&Address, Size); - } - else - { - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID *)&Address, - 0, - Size, - MEM_COMMIT, - PAGE_READWRITE); - } - - DPRINT("Committed %d bytes at base %p, UCR size is %d\n", *Size, Address, UcrDescriptor->Size); - - /* Fail in unsuccessful case */ - if (!NT_SUCCESS(Status)) - { - DPRINT1("Committing page failed with status 0x%08X\n", Status); - return NULL; - } - - /* Update tracking numbers */ - Segment->NumberOfUnCommittedPages -= *Size / PAGE_SIZE; - - /* Calculate first and last entries */ - FirstEntry = (PHEAP_ENTRY)Address; - - if ((Segment->LastEntryInSegment->Flags & HEAP_ENTRY_LAST_ENTRY) && - (ULONG_PTR)(Segment->LastEntryInSegment + Segment->LastEntryInSegment->Size) == (ULONG_PTR)UcrDescriptor->Address) - { - LastEntry = Segment->LastEntryInSegment; - } - else - { - /* Go through the entries to find the last one */ - - if (PreviousUcr) - LastEntry = (PHEAP_ENTRY)((ULONG_PTR)PreviousUcr->Address + PreviousUcr->Size); - else - LastEntry = Segment->FirstEntry; - - while (!(LastEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) - { - PreviousLastEntry = LastEntry; - LastEntry += LastEntry->Size; - - if ((ULONG_PTR)LastEntry >= (ULONG_PTR)Segment->LastValidEntry || - LastEntry->Size == 0) - { - if (LastEntry == (PHEAP_ENTRY)Address) - { - /* Found it */ - LastEntry = PreviousLastEntry; - break; - } - - DPRINT1("Last entry not found in a committed range near to %p\n", PreviousLastEntry); - return NULL; - } - } - } - - /* Unmark it as a last entry */ - LastEntry->Flags &= ~HEAP_ENTRY_LAST_ENTRY; - - /* Update UCR descriptor */ - UcrDescriptor->Address = (PVOID)((ULONG_PTR)UcrDescriptor->Address + *Size); - UcrDescriptor->Size -= *Size; - - DPRINT("Updating UcrDescriptor %p, new Address %p, size %d\n", - UcrDescriptor, UcrDescriptor->Address, UcrDescriptor->Size); - - /* Check if anything left in this UCR */ - if (UcrDescriptor->Size == 0) - { - /* It's fully exhausted */ - if (UcrDescriptor->Address == Segment->LastValidEntry) - { - FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY; - Segment->LastEntryInSegment = FirstEntry; - } - else - { - FirstEntry->Flags = 0; - Segment->LastEntryInSegment = Segment->FirstEntry; - } - - /* This UCR needs to be removed because it became useless */ - RemoveEntryList(&UcrDescriptor->SegmentEntry); - - RtlpDestroyUnCommittedRange(Segment, UcrDescriptor); - Segment->NumberOfUnCommittedRanges--; - } - else - { - FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY; - Segment->LastEntryInSegment = FirstEntry; - } - - /* Set various first entry fields*/ - FirstEntry->SegmentOffset = LastEntry->SegmentOffset; - FirstEntry->Size = *Size >> HEAP_ENTRY_SHIFT; - FirstEntry->PreviousSize = LastEntry->Size; - - /* Update previous size */ - if (!(FirstEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) - (FirstEntry + FirstEntry->Size)->PreviousSize = FirstEntry->Size; - - /* We're done */ - return (PHEAP_FREE_ENTRY)FirstEntry; - } - - /* Advance to the next descriptor */ - PreviousUcr = UcrDescriptor; - Current = Current->Flink; - } - - return NULL; -} - -VOID NTAPI -RtlpDeCommitFreeBlock(PHEAP Heap, - PHEAP_FREE_ENTRY FreeEntry, - SIZE_T Size) -{ - PHEAP_SEGMENT Segment; - PHEAP_ENTRY PrecedingInUseEntry = NULL, NextInUseEntry = NULL; - PHEAP_FREE_ENTRY NextFreeEntry; - PHEAP_UCR_DESCRIPTOR UcrDescriptor; - ULONG PrecedingSize, NextSize, DecommitSize; - ULONG_PTR DecommitBase; - NTSTATUS Status; - - DPRINT("Decommitting %p %p %x\n", Heap, FreeEntry, Size); - - /* We can't decommit if there is a commit routine! */ - if (Heap->CommitRoutine) - { - /* Just add it back the usual way */ - RtlpInsertFreeBlock(Heap, FreeEntry, Size); - return; - } - - /* Get the segment */ - Segment = Heap->Segments[FreeEntry->SegmentOffset]; - - /* Get the preceding entry */ - DecommitBase = ROUND_UP(FreeEntry, PAGE_SIZE); - PrecedingSize = (PHEAP_ENTRY)DecommitBase - (PHEAP_ENTRY)FreeEntry; - - if (PrecedingSize == 1) - { - /* Just 1 heap entry, increase the base/size */ - DecommitBase += PAGE_SIZE; - PrecedingSize += PAGE_SIZE >> HEAP_ENTRY_SHIFT; - } - else if (FreeEntry->PreviousSize && - (DecommitBase == (ULONG_PTR)FreeEntry)) - { - PrecedingInUseEntry = (PHEAP_ENTRY)FreeEntry - FreeEntry->PreviousSize; - } - - /* Get the next entry */ - NextFreeEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeEntry + Size); - DecommitSize = ROUND_DOWN(NextFreeEntry, PAGE_SIZE); - NextSize = (PHEAP_ENTRY)NextFreeEntry - (PHEAP_ENTRY)DecommitSize; - - if (NextSize == 1) - { - /* Just 1 heap entry, increase the size */ - DecommitSize -= PAGE_SIZE; - NextSize += PAGE_SIZE >> HEAP_ENTRY_SHIFT; - } - else if (NextSize == 0 && - !(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) - { - NextInUseEntry = (PHEAP_ENTRY)NextFreeEntry; - } - - NextFreeEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)NextFreeEntry - NextSize); - - /* Calculate real decommit size */ - if (DecommitSize > DecommitBase) - { - DecommitSize -= DecommitBase; - } - else - { - /* Nothing to decommit */ - RtlpInsertFreeBlock(Heap, FreeEntry, Size); - return; - } - - /* A decommit is necessary. Create a UCR descriptor */ - UcrDescriptor = RtlpCreateUnCommittedRange(Segment); - if (!UcrDescriptor) - { - DPRINT1("HEAP: Failed to create UCR descriptor\n"); - RtlpInsertFreeBlock(Heap, FreeEntry, PrecedingSize); - return; - } - - /* Decommit the memory */ - Status = ZwFreeVirtualMemory(NtCurrentProcess(), - (PVOID *)&DecommitBase, - &DecommitSize, - MEM_DECOMMIT); - - /* Delete that UCR. This is needed to assure there is an unused UCR entry in the list */ - RtlpDestroyUnCommittedRange(Segment, UcrDescriptor); - - if (!NT_SUCCESS(Status)) - { - RtlpInsertFreeBlock(Heap, FreeEntry, Size); - return; - } - - /* Insert uncommitted pages */ - RtlpInsertUnCommittedPages(Segment, DecommitBase, DecommitSize); - Segment->NumberOfUnCommittedPages += (DecommitSize / PAGE_SIZE); - - if (PrecedingSize) - { - /* Adjust size of this free entry and insert it */ - FreeEntry->Flags = HEAP_ENTRY_LAST_ENTRY; - FreeEntry->Size = PrecedingSize; - Heap->TotalFreeSize += PrecedingSize; - - /* Set last entry in the segment to this entry */ - Segment->LastEntryInSegment = (PHEAP_ENTRY)FreeEntry; - - /* Insert it into the free list */ - RtlpInsertFreeBlockHelper(Heap, FreeEntry, PrecedingSize, FALSE); - } - else if (PrecedingInUseEntry) - { - /* Adjust preceding in use entry */ - PrecedingInUseEntry->Flags |= HEAP_ENTRY_LAST_ENTRY; - Segment->LastEntryInSegment = PrecedingInUseEntry; - } else if ((ULONG_PTR)Segment->LastEntryInSegment >= DecommitBase && - ((PCHAR)Segment->LastEntryInSegment < ((PCHAR)DecommitBase + DecommitSize))) - { - /* Update this segment's last entry */ - Segment->LastEntryInSegment = Segment->FirstEntry; - } - - /* Now the next one */ - if (NextSize) - { - /* Adjust size of this free entry and insert it */ - NextFreeEntry->Flags = 0; - NextFreeEntry->PreviousSize = 0; - NextFreeEntry->SegmentOffset = Segment->Entry.SegmentOffset; - NextFreeEntry->Size = NextSize; - - ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)NextFreeEntry + NextSize))->PreviousSize = NextSize; - - Heap->TotalFreeSize += NextSize; - RtlpInsertFreeBlockHelper(Heap, NextFreeEntry, NextSize, FALSE); - } - else if (NextInUseEntry) - { - NextInUseEntry->PreviousSize = 0; - } -} - -BOOLEAN NTAPI -RtlpInitializeHeapSegment(PHEAP Heap, - PHEAP_SEGMENT Segment, - UCHAR SegmentIndex, - ULONG Flags, - PVOID BaseAddress, - PVOID UncommittedBase, - PVOID LimitAddress) -{ - ULONG Pages, CommitSize; - PHEAP_ENTRY HeapEntry; - USHORT PreviousSize = 0, NewSize; - NTSTATUS Status; - - Pages = ((PCHAR)LimitAddress - (PCHAR)BaseAddress) / PAGE_SIZE; - - HeapEntry = (PHEAP_ENTRY)ROUND_UP(Segment + 1, HEAP_ENTRY_SIZE); - - DPRINT("RtlpInitializeHeapSegment(%p %p %x %x %p %p %p)\n", Heap, Segment, SegmentIndex, Flags, BaseAddress, UncommittedBase, LimitAddress); - DPRINT("Pages %x, HeapEntry %p, sizeof(HEAP_SEGMENT) %x\n", Pages, HeapEntry, sizeof(HEAP_SEGMENT)); - - /* Check if it's the first segment and remember its size */ - if (Heap == BaseAddress) - PreviousSize = Heap->Entry.Size; - - NewSize = ((PCHAR)HeapEntry - (PCHAR)Segment) >> HEAP_ENTRY_SHIFT; - - if ((PVOID)(HeapEntry + 1) >= UncommittedBase) - { - /* Check if it goes beyond the limit */ - if ((PVOID)(HeapEntry + 1) >= LimitAddress) - return FALSE; - - /* Need to commit memory */ - CommitSize = (PCHAR)(HeapEntry + 1) - (PCHAR)UncommittedBase; - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID)&UncommittedBase, - 0, - &CommitSize, - MEM_COMMIT, - PAGE_READWRITE); - if (!NT_SUCCESS(Status)) - { - DPRINT1("Committing page failed with status 0x%08X\n", Status); - return FALSE; - } - - DPRINT("Committed %d bytes at base %p\n", CommitSize, UncommittedBase); - - /* Calcule the new uncommitted base */ - UncommittedBase = (PVOID)((PCHAR)UncommittedBase + CommitSize); - } - - /* Initialize the segment entry */ - Segment->Entry.PreviousSize = PreviousSize; - Segment->Entry.Size = NewSize; - Segment->Entry.Flags = HEAP_ENTRY_BUSY; - Segment->Entry.SegmentOffset = SegmentIndex; - - /* Initialize the segment itself */ - Segment->SegmentSignature = HEAP_SEGMENT_SIGNATURE; - Segment->Heap = Heap; - Segment->BaseAddress = BaseAddress; - Segment->FirstEntry = HeapEntry; - Segment->LastValidEntry = (PHEAP_ENTRY)((PCHAR)BaseAddress + Pages * PAGE_SIZE); - Segment->NumberOfPages = Pages; - Segment->NumberOfUnCommittedPages = ((PCHAR)LimitAddress - (PCHAR)UncommittedBase) / PAGE_SIZE; - InitializeListHead(&Segment->UCRSegmentList); - - /* Insert uncommitted pages into UCR (uncommitted ranges) list */ - if (Segment->NumberOfUnCommittedPages) - { - RtlpInsertUnCommittedPages(Segment, (ULONG_PTR)UncommittedBase, Segment->NumberOfUnCommittedPages * PAGE_SIZE); - } - - /* Set the segment index pointer */ - Heap->Segments[SegmentIndex] = Segment; - - /* Prepare a free heap entry */ - HeapEntry->Flags = HEAP_ENTRY_LAST_ENTRY; - HeapEntry->PreviousSize = Segment->Entry.Size; - HeapEntry->SegmentOffset = SegmentIndex; - - /* Set last entry in segment */ - Segment->LastEntryInSegment = HeapEntry; - - /* Insert it */ - RtlpInsertFreeBlock(Heap, (PHEAP_FREE_ENTRY)HeapEntry, (PHEAP_ENTRY)UncommittedBase - HeapEntry); - - return TRUE; -} - -VOID NTAPI -RtlpDestroyHeapSegment(PHEAP_SEGMENT Segment) -{ - NTSTATUS Status; - PVOID BaseAddress; - SIZE_T Size = 0; - - /* Make sure it's not user allocated */ - if (Segment->SegmentFlags & HEAP_USER_ALLOCATED) return; - - BaseAddress = Segment->BaseAddress; - DPRINT("Destroying segment %p, BA %p\n", Segment, BaseAddress); - - /* Release virtual memory */ - Status = ZwFreeVirtualMemory(NtCurrentProcess(), - &BaseAddress, - &Size, - MEM_RELEASE); - - if (!NT_SUCCESS(Status)) - { - DPRINT1("HEAP: Failed to release segment's memory with status 0x%08X\n", Status); - } -} - -/* Usermode only! */ -VOID NTAPI -RtlpAddHeapToProcessList(PHEAP Heap) -{ - PPEB Peb; - - /* Get PEB */ - Peb = RtlGetCurrentPeb(); - - /* Acquire the lock */ - RtlEnterHeapLock(&RtlpProcessHeapsListLock); - - //_SEH2_TRY { - /* Check if max number of heaps reached */ - if (Peb->NumberOfHeaps == Peb->MaximumNumberOfHeaps) - { - // TODO: Handle this case - ASSERT(FALSE); - } - - /* Add the heap to the process heaps */ - Peb->ProcessHeaps[Peb->NumberOfHeaps] = Heap; - Peb->NumberOfHeaps++; - Heap->ProcessHeapsListIndex = Peb->NumberOfHeaps; - // } _SEH2_FINALLY { - - /* Release the lock */ - RtlLeaveHeapLock(&RtlpProcessHeapsListLock); - - // } _SEH2_END -} - -/* Usermode only! */ -VOID NTAPI -RtlpRemoveHeapFromProcessList(PHEAP Heap) -{ - PPEB Peb; - PHEAP *Current, *Next; - ULONG Count; - - /* Get PEB */ - Peb = RtlGetCurrentPeb(); - - /* Acquire the lock */ - RtlEnterHeapLock(&RtlpProcessHeapsListLock); - - /* Check if we don't need anything to do */ - if ((Heap->ProcessHeapsListIndex == 0) || - (Heap->ProcessHeapsListIndex > Peb->NumberOfHeaps) || - (Peb->NumberOfHeaps == 0)) - { - /* Release the lock */ - RtlLeaveHeapLock(&RtlpProcessHeapsListLock); - - return; - } - - /* The process actually has more than one heap. - Use classic, lernt from university times algorithm for removing an entry - from a static array */ - - Current = (PHEAP *)&Peb->ProcessHeaps[Heap->ProcessHeapsListIndex - 1]; - Next = Current + 1; - - /* How many items we need to shift to the left */ - Count = Peb->NumberOfHeaps - (Heap->ProcessHeapsListIndex - 1); - - /* Move them all in a loop */ - while (--Count) - { - /* Copy it and advance next pointer */ - *Current = *Next; - - /* Update its index */ - (*Current)->ProcessHeapsListIndex -= 1; - - /* Advance pointers */ - Current++; - Next++; - } - - /* Decrease total number of heaps */ - Peb->NumberOfHeaps--; - - /* Zero last unused item */ - Peb->ProcessHeaps[Peb->NumberOfHeaps] = NULL; - Heap->ProcessHeapsListIndex = 0; - - /* Release the lock */ - RtlLeaveHeapLock(&RtlpProcessHeapsListLock); -} - -PHEAP_FREE_ENTRY NTAPI -RtlpCoalesceHeap(PHEAP Heap) -{ - UNIMPLEMENTED; - return NULL; -} - -PHEAP_FREE_ENTRY NTAPI -RtlpCoalesceFreeBlocks (PHEAP Heap, - PHEAP_FREE_ENTRY FreeEntry, - PSIZE_T FreeSize, - BOOLEAN Remove) -{ - PHEAP_FREE_ENTRY CurrentEntry, NextEntry; - - /* Get the previous entry */ - CurrentEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeEntry - FreeEntry->PreviousSize); - - /* Check it */ - if (CurrentEntry != FreeEntry && - !(CurrentEntry->Flags & HEAP_ENTRY_BUSY) && - (*FreeSize + CurrentEntry->Size) <= HEAP_MAX_BLOCK_SIZE) - { - ASSERT(FreeEntry->PreviousSize == CurrentEntry->Size); - - /* Remove it if asked for */ - if (Remove) - { - RtlpRemoveFreeBlock(Heap, FreeEntry, FALSE, FALSE); - Heap->TotalFreeSize -= FreeEntry->Size; - - /* Remove it only once! */ - Remove = FALSE; - } - - /* Remove previous entry too */ - RtlpRemoveFreeBlock(Heap, CurrentEntry, FALSE, FALSE); - - /* Copy flags */ - CurrentEntry->Flags = FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY; - - /* Update last entry in the segment */ - if (CurrentEntry->Flags & HEAP_ENTRY_LAST_ENTRY) - Heap->Segments[CurrentEntry->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)CurrentEntry; - - /* Advance FreeEntry and update sizes */ - FreeEntry = CurrentEntry; - *FreeSize = *FreeSize + CurrentEntry->Size; - Heap->TotalFreeSize -= CurrentEntry->Size; - FreeEntry->Size = *FreeSize; - - /* Also update previous size if needed */ - if (!(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) - { - ((PHEAP_ENTRY)FreeEntry + *FreeSize)->PreviousSize = *FreeSize; - } - } - - /* Check the next block if it exists */ - if (!(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) - { - NextEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeEntry + *FreeSize); - - if (!(NextEntry->Flags & HEAP_ENTRY_BUSY) && - NextEntry->Size + *FreeSize <= HEAP_MAX_BLOCK_SIZE) - { - ASSERT(*FreeSize == NextEntry->PreviousSize); - - /* Remove it if asked for */ - if (Remove) - { - RtlpRemoveFreeBlock(Heap, FreeEntry, FALSE, FALSE); - Heap->TotalFreeSize -= FreeEntry->Size; - } - - /* Copy flags */ - FreeEntry->Flags = NextEntry->Flags & HEAP_ENTRY_LAST_ENTRY; - - /* Update last entry in the segment */ - if (FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY) - Heap->Segments[FreeEntry->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)FreeEntry; - - /* Remove next entry now */ - RtlpRemoveFreeBlock(Heap, NextEntry, FALSE, FALSE); - - /* Update sizes */ - *FreeSize = *FreeSize + NextEntry->Size; - Heap->TotalFreeSize -= NextEntry->Size; - FreeEntry->Size = *FreeSize; - - /* Also update previous size if needed */ - if (!(FreeEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) - { - ((PHEAP_ENTRY)FreeEntry + *FreeSize)->PreviousSize = *FreeSize; - } - } - } - return FreeEntry; -} - -PHEAP_FREE_ENTRY NTAPI -RtlpExtendHeap(PHEAP Heap, - SIZE_T Size) -{ - ULONG Pages; - UCHAR Index, EmptyIndex; - SIZE_T FreeSize, CommitSize, ReserveSize; - PHEAP_SEGMENT Segment; - PHEAP_FREE_ENTRY FreeEntry; - NTSTATUS Status; - - DPRINT("RtlpExtendHeap(%p %x)\n", Heap, Size); - - /* Calculate amount in pages */ - Pages = (Size + PAGE_SIZE - 1) / PAGE_SIZE; - FreeSize = Pages * PAGE_SIZE; - DPRINT("Pages %x, FreeSize %x. Going through segments...\n", Pages, FreeSize); - - /* Find an empty segment */ - EmptyIndex = HEAP_SEGMENTS; - for (Index = 0; Index < HEAP_SEGMENTS; Index++) - { - Segment = Heap->Segments[Index]; - - if (Segment) DPRINT("Segment[%d] %p with NOUCP %x\n", Index, Segment, Segment->NumberOfUnCommittedPages); - - /* Check if its size suits us */ - if (Segment && - Pages <= Segment->NumberOfUnCommittedPages) - { - DPRINT("This segment is suitable\n"); - - /* Commit needed amount */ - FreeEntry = RtlpFindAndCommitPages(Heap, Segment, &FreeSize, NULL); - - /* Coalesce it with adjacent entries */ - if (FreeEntry) - { - FreeSize = FreeSize >> HEAP_ENTRY_SHIFT; - FreeEntry = RtlpCoalesceFreeBlocks(Heap, FreeEntry, &FreeSize, FALSE); - RtlpInsertFreeBlock(Heap, FreeEntry, FreeSize); - return FreeEntry; - } - } - else if (!Segment && - EmptyIndex == HEAP_SEGMENTS) - { - /* Remember the first unused segment index */ - EmptyIndex = Index; - } - } - - /* No luck, need to grow the heap */ - if ((Heap->Flags & HEAP_GROWABLE) && - (EmptyIndex != HEAP_SEGMENTS)) - { - Segment = NULL; - - /* Reserve the memory */ - if ((Size + PAGE_SIZE) <= Heap->SegmentReserve) - ReserveSize = Heap->SegmentReserve; - else - ReserveSize = Size + PAGE_SIZE; - - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID)&Segment, - 0, - &ReserveSize, - MEM_RESERVE, - PAGE_READWRITE); - - /* If it failed, retry again with a half division algorithm */ - while (!NT_SUCCESS(Status) && - ReserveSize != Size + PAGE_SIZE) - { - ReserveSize /= 2; - - if (ReserveSize < (Size + PAGE_SIZE)) - ReserveSize = Size + PAGE_SIZE; - - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID)&Segment, - 0, - &ReserveSize, - MEM_RESERVE, - PAGE_READWRITE); - } - - /* Proceed only if it's success */ - if (NT_SUCCESS(Status)) - { - Heap->SegmentReserve += ReserveSize; - - /* Now commit the memory */ - if ((Size + PAGE_SIZE) <= Heap->SegmentCommit) - CommitSize = Heap->SegmentCommit; - else - CommitSize = Size + PAGE_SIZE; - - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID)&Segment, - 0, - &CommitSize, - MEM_COMMIT, - PAGE_READWRITE); - - DPRINT("Committed %d bytes at base %p\n", CommitSize, Segment); - - /* Initialize heap segment if commit was successful */ - if (NT_SUCCESS(Status)) - { - if (!RtlpInitializeHeapSegment(Heap, Segment, EmptyIndex, 0, Segment, - (PCHAR)Segment + CommitSize, (PCHAR)Segment + ReserveSize)) - { - Status = STATUS_NO_MEMORY; - } - } - - /* If everything worked - cool */ - if (NT_SUCCESS(Status)) return (PHEAP_FREE_ENTRY)Segment->FirstEntry; - - DPRINT1("Committing failed with status 0x%08X\n", Status); - - /* Nope, we failed. Free memory */ - ZwFreeVirtualMemory(NtCurrentProcess(), - (PVOID)&Segment, - &ReserveSize, - MEM_RELEASE); - } - else - { - DPRINT1("Reserving failed with status 0x%08X\n", Status); - } - } - - if (RtlpGetMode() == UserMode) - { - /* If coalescing on free is disabled in usermode, then do it here */ - if (Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE) - { - FreeEntry = RtlpCoalesceHeap(Heap); - - /* If it's a suitable one - return it */ - if (FreeEntry && - FreeEntry->Size >= Size) - { - return FreeEntry; - } - } - } - - return NULL; -} - -/*********************************************************************** - * RtlCreateHeap - * RETURNS - * Handle of heap: Success - * NULL: Failure - * - * @implemented - */ -HANDLE NTAPI -RtlCreateHeap(ULONG Flags, - PVOID Addr, - SIZE_T TotalSize, - SIZE_T CommitSize, - PVOID Lock, - PRTL_HEAP_PARAMETERS Parameters) -{ - PVOID CommittedAddress = NULL, UncommittedAddress = NULL; - PHEAP Heap = NULL; - RTL_HEAP_PARAMETERS SafeParams = {0}; - PPEB Peb; - ULONG_PTR MaximumUserModeAddress; - SYSTEM_BASIC_INFORMATION SystemInformation; - MEMORY_BASIC_INFORMATION MemoryInfo; - ULONG NtGlobalFlags = RtlGetNtGlobalFlags(); - ULONG HeapSegmentFlags = 0; - NTSTATUS Status; - ULONG MaxBlockSize, HeaderSize; - BOOLEAN AllocateLock = FALSE; - - /* Check for a special heap */ - if (RtlpPageHeapEnabled && !Addr && !Lock) - { - Heap = RtlpPageHeapCreate(Flags, Addr, TotalSize, CommitSize, Lock, Parameters); - if (Heap) return Heap; - - //ASSERT(FALSE); - DPRINT1("Enabling page heap failed\n"); - } - - /* Check validation flags */ - if (!(Flags & HEAP_SKIP_VALIDATION_CHECKS) && (Flags & ~HEAP_CREATE_VALID_MASK)) - { - DPRINT1("Invalid flags 0x%08x, fixing...\n", Flags); - Flags &= HEAP_CREATE_VALID_MASK; - } - - /* TODO: Capture parameters, once we decide to use SEH */ - if (!Parameters) Parameters = &SafeParams; - - /* Check global flags */ - if (NtGlobalFlags & FLG_HEAP_DISABLE_COALESCING) - Flags |= HEAP_DISABLE_COALESCE_ON_FREE; - - if (NtGlobalFlags & FLG_HEAP_ENABLE_FREE_CHECK) - Flags |= HEAP_FREE_CHECKING_ENABLED; - - if (NtGlobalFlags & FLG_HEAP_ENABLE_TAIL_CHECK) - Flags |= HEAP_TAIL_CHECKING_ENABLED; - - if (RtlpGetMode() == UserMode) - { - /* Also check these flags if in usermode */ - if (NtGlobalFlags & FLG_HEAP_VALIDATE_ALL) - Flags |= HEAP_VALIDATE_ALL_ENABLED; - - if (NtGlobalFlags & FLG_HEAP_VALIDATE_PARAMETERS) - Flags |= HEAP_VALIDATE_PARAMETERS_ENABLED; - - if (NtGlobalFlags & FLG_USER_STACK_TRACE_DB) - Flags |= HEAP_CAPTURE_STACK_BACKTRACES; - - /* Get PEB */ - Peb = RtlGetCurrentPeb(); - - /* Apply defaults for non-set parameters */ - if (!Parameters->SegmentCommit) Parameters->SegmentCommit = Peb->HeapSegmentCommit; - if (!Parameters->SegmentReserve) Parameters->SegmentReserve = Peb->HeapSegmentReserve; - if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = Peb->HeapDeCommitFreeBlockThreshold; - if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = Peb->HeapDeCommitTotalFreeThreshold; - } - else - { - /* Apply defaults for non-set parameters */ -#if 0 - if (!Parameters->SegmentCommit) Parameters->SegmentCommit = MmHeapSegmentCommit; - if (!Parameters->SegmentReserve) Parameters->SegmentReserve = MmHeapSegmentReserve; - if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold; - if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold; -#endif - } - - // FIXME: Move to memory manager - if (!Parameters->SegmentCommit) Parameters->SegmentCommit = PAGE_SIZE * 2; - if (!Parameters->SegmentReserve) Parameters->SegmentReserve = 1048576; - if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = PAGE_SIZE; - if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = 65536; - - /* Get the max um address */ - Status = ZwQuerySystemInformation(SystemBasicInformation, - &SystemInformation, - sizeof(SystemInformation), - NULL); - - if (!NT_SUCCESS(Status)) - { - DPRINT1("Getting max usermode address failed with status 0x%08x\n", Status); - return NULL; - } - - MaximumUserModeAddress = SystemInformation.MaximumUserModeAddress; - - /* Calculate max alloc size */ - if (!Parameters->MaximumAllocationSize) - Parameters->MaximumAllocationSize = MaximumUserModeAddress - (ULONG_PTR)0x10000 - PAGE_SIZE; - - MaxBlockSize = 0x80000 - PAGE_SIZE; - - if (!Parameters->VirtualMemoryThreshold || - Parameters->VirtualMemoryThreshold > MaxBlockSize) - { - Parameters->VirtualMemoryThreshold = MaxBlockSize; - } - - /* Check reserve/commit sizes and set default values */ - if (!CommitSize) - { - CommitSize = PAGE_SIZE; - if (TotalSize) - TotalSize = ROUND_UP(TotalSize, PAGE_SIZE); - else - TotalSize = 64 * PAGE_SIZE; - } - else - { - /* Round up the commit size to be at least the page size */ - CommitSize = ROUND_UP(CommitSize, PAGE_SIZE); - - if (TotalSize) - TotalSize = ROUND_UP(TotalSize, PAGE_SIZE); - else - TotalSize = ROUND_UP(CommitSize, 16 * PAGE_SIZE); - } - - /* Call special heap */ - if (RtlpHeapIsSpecial(Flags)) - return RtlDebugCreateHeap(Flags, Addr, TotalSize, CommitSize, Lock, Parameters); - - /* Calculate header size */ - HeaderSize = sizeof(HEAP); - if (!(Flags & HEAP_NO_SERIALIZE)) - { - if (Lock) - { - Flags |= HEAP_LOCK_USER_ALLOCATED; - } - else - { - HeaderSize += sizeof(HEAP_LOCK); - AllocateLock = TRUE; - } - } - else if (Lock) - { - /* Invalid parameters */ - return NULL; - } - - /* See if we are already provided with an address for the heap */ - if (Addr) - { - if (Parameters->CommitRoutine) - { - /* There is a commit routine, so no problem here, check params */ - if ((Flags & HEAP_GROWABLE) || - !Parameters->InitialCommit || - !Parameters->InitialReserve || - (Parameters->InitialCommit > Parameters->InitialReserve)) - { - /* Fail */ - return NULL; - } - - /* Calculate committed and uncommitted addresses */ - CommittedAddress = Addr; - UncommittedAddress = (PCHAR)Addr + Parameters->InitialCommit; - TotalSize = Parameters->InitialReserve; - - /* Zero the initial page ourselves */ - RtlZeroMemory(CommittedAddress, PAGE_SIZE); - } - else - { - /* Commit routine is absent, so query how much memory caller reserved */ - Status = ZwQueryVirtualMemory(NtCurrentProcess(), - Addr, - MemoryBasicInformation, - &MemoryInfo, - sizeof(MemoryInfo), - NULL); - - if (!NT_SUCCESS(Status)) - { - DPRINT1("Querying amount of user supplied memory failed with status 0x%08X\n", Status); - return NULL; - } - - /* Validate it */ - if (MemoryInfo.BaseAddress != Addr || - MemoryInfo.State == MEM_FREE) - { - return NULL; - } - - /* Validation checks passed, set committed/uncommitted addresses */ - CommittedAddress = Addr; - - /* Check if it's committed or not */ - if (MemoryInfo.State == MEM_COMMIT) - { - /* Zero it out because it's already committed */ - RtlZeroMemory(CommittedAddress, PAGE_SIZE); - - /* Calculate uncommitted address value */ - CommitSize = MemoryInfo.RegionSize; - TotalSize = CommitSize; - UncommittedAddress = (PCHAR)Addr + CommitSize; - - /* Check if uncommitted address is reserved */ - Status = ZwQueryVirtualMemory(NtCurrentProcess(), - UncommittedAddress, - MemoryBasicInformation, - &MemoryInfo, - sizeof(MemoryInfo), - NULL); - - if (NT_SUCCESS(Status) && - MemoryInfo.State == MEM_RESERVE) - { - /* It is, so add it up to the reserve size */ - TotalSize += MemoryInfo.RegionSize; - } - } - else - { - /* It's not committed, inform following code that a commit is necessary */ - CommitSize = PAGE_SIZE; - UncommittedAddress = Addr; - } - } - - /* Mark this as a user-committed mem */ - HeapSegmentFlags = HEAP_USER_ALLOCATED; - Heap = (PHEAP)Addr; - } - else - { - /* Check commit routine */ - if (Parameters->CommitRoutine) return NULL; - - /* Reserve memory */ - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID *)&Heap, - 0, - &TotalSize, - MEM_RESERVE, - PAGE_READWRITE); - - if (!NT_SUCCESS(Status)) - { - DPRINT1("Failed to reserve memory with status 0x%08x\n", Status); - return NULL; - } - - /* Set base addresses */ - CommittedAddress = Heap; - UncommittedAddress = Heap; - } - - /* Check if we need to commit something */ - if (CommittedAddress == UncommittedAddress) - { - /* Commit the required size */ - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - &CommittedAddress, - 0, - &CommitSize, - MEM_COMMIT, - PAGE_READWRITE); - - DPRINT("Committed %d bytes at base %p\n", CommitSize, CommittedAddress); - - if (!NT_SUCCESS(Status)) - { - DPRINT1("Failure, Status 0x%08X\n", Status); - - /* Release memory if it was reserved */ - if (!Addr) ZwFreeVirtualMemory(NtCurrentProcess(), - (PVOID *)&Heap, - &TotalSize, - MEM_RELEASE); - - return NULL; - } - - /* Calculate new uncommitted address */ - UncommittedAddress = (PCHAR)UncommittedAddress + CommitSize; - } - - DPRINT("Created heap %p, CommitSize %x, ReserveSize %x\n", Heap, CommitSize, TotalSize); - - /* Initialize the heap */ - RtlpInitializeHeap(Heap, &HeaderSize, Flags, AllocateLock, Lock); - - /* Initialize heap's first segment */ - if (!RtlpInitializeHeapSegment(Heap, - (PHEAP_SEGMENT)((PCHAR)Heap + HeaderSize), - 0, - HeapSegmentFlags, - CommittedAddress, - UncommittedAddress, - (PCHAR)CommittedAddress + TotalSize)) - { - DPRINT1("Failed to initialize heap segment\n"); - return NULL; - } - - /* Set other data */ - Heap->ProcessHeapsListIndex = 0; - Heap->SegmentCommit = Parameters->SegmentCommit; - Heap->SegmentReserve = Parameters->SegmentReserve; - Heap->DeCommitFreeBlockThreshold = Parameters->DeCommitFreeBlockThreshold >> HEAP_ENTRY_SHIFT; - Heap->DeCommitTotalFreeThreshold = Parameters->DeCommitTotalFreeThreshold >> HEAP_ENTRY_SHIFT; - Heap->MaximumAllocationSize = Parameters->MaximumAllocationSize; - Heap->VirtualMemoryThreshold = ROUND_UP(Parameters->VirtualMemoryThreshold, HEAP_ENTRY_SIZE) >> HEAP_ENTRY_SHIFT; - Heap->CommitRoutine = Parameters->CommitRoutine; - - /* Set alignment */ - if (Flags & HEAP_CREATE_ALIGN_16) - { - Heap->AlignMask = (ULONG)~15; - Heap->AlignRound = 15 + sizeof(HEAP_ENTRY); - } - else - { - Heap->AlignMask = (ULONG)~(HEAP_ENTRY_SIZE - 1); - Heap->AlignRound = HEAP_ENTRY_SIZE - 1 + sizeof(HEAP_ENTRY); - } - - if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) - Heap->AlignRound += HEAP_ENTRY_SIZE; - - /* Add heap to process list in case of usermode heap */ - if (RtlpGetMode() == UserMode) - { - RtlpAddHeapToProcessList(Heap); - - // FIXME: What about lookasides? - } - - DPRINT("Heap %p, flags 0x%08x\n", Heap, Heap->Flags); - return Heap; -} - -/*********************************************************************** - * RtlDestroyHeap - * RETURNS - * TRUE: Success - * FALSE: Failure - * - * @implemented - * - * RETURNS - * Success: A NULL HANDLE, if heap is NULL or it was destroyed - * Failure: The Heap handle, if heap is the process heap. - */ -HANDLE NTAPI -RtlDestroyHeap(HANDLE HeapPtr) /* [in] Handle of heap */ -{ - PHEAP Heap = (PHEAP)HeapPtr; - PLIST_ENTRY Current; - PHEAP_UCR_SEGMENT UcrSegment; - PHEAP_VIRTUAL_ALLOC_ENTRY VirtualEntry; - PVOID BaseAddress; - SIZE_T Size; - LONG i; - PHEAP_SEGMENT Segment; - - if (!HeapPtr) return NULL; - - /* Call special heap */ - if (RtlpHeapIsSpecial(Heap->Flags)) - { - if (!RtlDebugDestroyHeap(Heap)) return HeapPtr; - } - - /* Check for a process heap */ - if (RtlpGetMode() == UserMode && - HeapPtr == NtCurrentPeb()->ProcessHeap) return HeapPtr; - - /* Free up all big allocations */ - Current = Heap->VirtualAllocdBlocks.Flink; - while (Current != &Heap->VirtualAllocdBlocks) - { - VirtualEntry = CONTAINING_RECORD(Current, HEAP_VIRTUAL_ALLOC_ENTRY, Entry); - BaseAddress = (PVOID)VirtualEntry; - Current = Current->Flink; - Size = 0; - ZwFreeVirtualMemory(NtCurrentProcess(), - &BaseAddress, - &Size, - MEM_RELEASE); - } - - /* Delete tags and remove heap from the process heaps list in user mode */ - if (RtlpGetMode() == UserMode) - { - // FIXME DestroyTags - RtlpRemoveHeapFromProcessList(Heap); - } - - /* Delete the heap lock */ - if (!(Heap->Flags & HEAP_NO_SERIALIZE)) - { - /* Delete it if it wasn't user allocated */ - if (!(Heap->Flags & HEAP_LOCK_USER_ALLOCATED)) - RtlDeleteHeapLock(Heap->LockVariable); - - /* Clear out the lock variable */ - Heap->LockVariable = NULL; - } - - /* Free UCR segments if any were created */ - Current = Heap->UCRSegments.Flink; - while(Current != &Heap->UCRSegments) - { - UcrSegment = CONTAINING_RECORD(Current, HEAP_UCR_SEGMENT, ListEntry); - - /* Advance to the next descriptor */ - Current = Current->Flink; - - BaseAddress = (PVOID)UcrSegment; - Size = 0; - - /* Release that memory */ - ZwFreeVirtualMemory(NtCurrentProcess(), - &BaseAddress, - &Size, - MEM_RELEASE); - } - - /* Go through segments and destroy them */ - for (i = HEAP_SEGMENTS - 1; i >= 0; i--) - { - Segment = Heap->Segments[i]; - if (Segment) RtlpDestroyHeapSegment(Segment); - } - - return NULL; -} - -PHEAP_ENTRY NTAPI -RtlpSplitEntry(PHEAP Heap, - PHEAP_FREE_ENTRY FreeBlock, - SIZE_T AllocationSize, - SIZE_T Index, - SIZE_T Size) -{ - PHEAP_FREE_ENTRY SplitBlock, SplitBlock2; - UCHAR FreeFlags; - PHEAP_ENTRY InUseEntry; - SIZE_T FreeSize; - - /* Save flags, update total free size */ - FreeFlags = FreeBlock->Flags; - Heap->TotalFreeSize -= FreeBlock->Size; - - /* Make this block an in-use one */ - InUseEntry = (PHEAP_ENTRY)FreeBlock; - InUseEntry->Flags = HEAP_ENTRY_BUSY; - InUseEntry->SmallTagIndex = 0; - - /* Calculate the extra amount */ - FreeSize = InUseEntry->Size - Index; - - /* Update it's size fields (we don't need their data anymore) */ - InUseEntry->Size = Index; - InUseEntry->UnusedBytes = AllocationSize - Size; - - /* If there is something to split - do the split */ - if (FreeSize != 0) - { - /* Don't split if resulting entry can't contain any payload data - (i.e. being just HEAP_ENTRY_SIZE) */ - if (FreeSize == 1) - { - /* Increase sizes of the in-use entry */ - InUseEntry->Size++; - InUseEntry->UnusedBytes += sizeof(HEAP_ENTRY); - } - else - { - /* Calculate a pointer to the new entry */ - SplitBlock = (PHEAP_FREE_ENTRY)(InUseEntry + Index); - - /* Initialize it */ - SplitBlock->Flags = FreeFlags; - SplitBlock->SegmentOffset = InUseEntry->SegmentOffset; - SplitBlock->Size = FreeSize; - SplitBlock->PreviousSize = Index; - - /* Check if it's the last entry */ - if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) - { - /* Insert it to the free list if it's the last entry */ - RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); - Heap->TotalFreeSize += FreeSize; - } - else - { - /* Not so easy - need to update next's previous size too */ - SplitBlock2 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize); - - if (SplitBlock2->Flags & HEAP_ENTRY_BUSY) - { - SplitBlock2->PreviousSize = (USHORT)FreeSize; - RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); - Heap->TotalFreeSize += FreeSize; - } - else - { - /* Even more complex - the next entry is free, so we can merge them into one! */ - SplitBlock->Flags = SplitBlock2->Flags; - - /* Remove that next entry */ - RtlpRemoveFreeBlock(Heap, SplitBlock2, FALSE, FALSE); - - /* Update sizes */ - FreeSize += SplitBlock2->Size; - Heap->TotalFreeSize -= SplitBlock2->Size; - - if (FreeSize <= HEAP_MAX_BLOCK_SIZE) - { - /* Insert it back */ - SplitBlock->Size = FreeSize; - - /* Don't forget to update previous size of the next entry! */ - if (!(SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) - { - ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = FreeSize; - } - - /* Actually insert it */ - RtlpInsertFreeBlockHelper(Heap, SplitBlock, (USHORT)FreeSize, FALSE); - - /* Update total size */ - Heap->TotalFreeSize += FreeSize; - } - else - { - /* Resulting block is quite big */ - RtlpInsertFreeBlock(Heap, SplitBlock, FreeSize); - } - } - } - - /* Reset flags of the free entry */ - FreeFlags = 0; - - /* Update last entry in segment */ - if (SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY) - { - Heap->Segments[SplitBlock->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock; - } - } - } - - /* Set last entry flag */ - if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) - InUseEntry->Flags |= HEAP_ENTRY_LAST_ENTRY; - - return InUseEntry; -} - -PVOID NTAPI -RtlpAllocateNonDedicated(PHEAP Heap, - ULONG Flags, - SIZE_T Size, - SIZE_T AllocationSize, - SIZE_T Index, - BOOLEAN HeapLocked) -{ - PLIST_ENTRY FreeListHead, Next; - PHEAP_FREE_ENTRY FreeBlock; - PHEAP_ENTRY InUseEntry; - PHEAP_ENTRY_EXTRA Extra; - EXCEPTION_RECORD ExceptionRecord; - - /* Go through the zero list to find a place where to insert the new entry */ - FreeListHead = &Heap->FreeLists[0]; - - /* Start from the largest block to reduce time */ - Next = FreeListHead->Blink; - if (FreeListHead != Next) - { - FreeBlock = CONTAINING_RECORD(Next, HEAP_FREE_ENTRY, FreeList); - - if (FreeBlock->Size >= Index) - { - /* Our request is smaller than the largest entry in the zero list */ - - /* Go through the list to find insertion place */ - Next = FreeListHead->Flink; - while (FreeListHead != Next) - { - FreeBlock = CONTAINING_RECORD(Next, HEAP_FREE_ENTRY, FreeList); - - if (FreeBlock->Size >= Index) - { - /* Found minimally fitting entry. Proceed to either using it as it is - or splitting it to two entries */ - RemoveEntryList(&FreeBlock->FreeList); - - /* Split it */ - InUseEntry = RtlpSplitEntry(Heap, FreeBlock, AllocationSize, Index, Size); - - /* Release the lock */ - if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); - - /* Zero memory if that was requested */ - if (Flags & HEAP_ZERO_MEMORY) - RtlZeroMemory(InUseEntry + 1, Size); - else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) - { - /* Fill this block with a special pattern */ - RtlFillMemoryUlong(InUseEntry + 1, Size & ~0x3, ARENA_INUSE_FILLER); - } - - /* Fill tail of the block with a special pattern too if requested */ - if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) - { - RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, sizeof(HEAP_ENTRY), HEAP_TAIL_FILL); - InUseEntry->Flags |= HEAP_ENTRY_FILL_PATTERN; - } - - /* Prepare extra if it's present */ - if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) - { - Extra = RtlpGetExtraStuffPointer(InUseEntry); - RtlZeroMemory(Extra, sizeof(HEAP_ENTRY_EXTRA)); - - // TODO: Tagging - } - - /* Return pointer to the */ - return InUseEntry + 1; - } - - /* Advance to the next entry */ - Next = Next->Flink; - } - } - } - - /* Extend the heap, 0 list didn't have anything suitable */ - FreeBlock = RtlpExtendHeap(Heap, AllocationSize); - - /* Use the new biggest entry we've got */ - if (FreeBlock) - { - RemoveEntryList(&FreeBlock->FreeList); - - /* Split it */ - InUseEntry = RtlpSplitEntry(Heap, FreeBlock, AllocationSize, Index, Size); - - /* Release the lock */ - if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); - - /* Zero memory if that was requested */ - if (Flags & HEAP_ZERO_MEMORY) - RtlZeroMemory(InUseEntry + 1, Size); - else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) - { - /* Fill this block with a special pattern */ - RtlFillMemoryUlong(InUseEntry + 1, Size & ~0x3, ARENA_INUSE_FILLER); - } - - /* Fill tail of the block with a special pattern too if requested */ - if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) - { - RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, sizeof(HEAP_ENTRY), HEAP_TAIL_FILL); - InUseEntry->Flags |= HEAP_ENTRY_FILL_PATTERN; - } - - /* Prepare extra if it's present */ - if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) - { - Extra = RtlpGetExtraStuffPointer(InUseEntry); - RtlZeroMemory(Extra, sizeof(HEAP_ENTRY_EXTRA)); - - // TODO: Tagging - } - - /* Return pointer to the */ - return InUseEntry + 1; - } - - /* Really unfortunate, out of memory condition */ - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_NO_MEMORY); - - /* Generate an exception */ - if (Flags & HEAP_GENERATE_EXCEPTIONS) - { - ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY; - ExceptionRecord.ExceptionRecord = NULL; - ExceptionRecord.NumberParameters = 1; - ExceptionRecord.ExceptionFlags = 0; - ExceptionRecord.ExceptionInformation[0] = AllocationSize; - - RtlRaiseException(&ExceptionRecord); - } - - /* Release the lock */ - if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); - DPRINT1("HEAP: Allocation failed!\n"); - DPRINT1("Flags %x\n", Heap->Flags); - return NULL; -} - -/*********************************************************************** - * HeapAlloc (KERNEL32.334) - * RETURNS - * Pointer to allocated memory block - * NULL: Failure - * 0x7d030f60--invalid flags in RtlHeapAllocate - * @implemented - */ -PVOID NTAPI -RtlAllocateHeap(IN PVOID HeapPtr, - IN ULONG Flags, - IN SIZE_T Size) -{ - PHEAP Heap = (PHEAP)HeapPtr; - PULONG FreeListsInUse; - ULONG FreeListsInUseUlong; - SIZE_T AllocationSize; - SIZE_T Index; - PLIST_ENTRY FreeListHead; - PHEAP_ENTRY InUseEntry; - PHEAP_FREE_ENTRY FreeBlock; - ULONG InUseIndex, i; - UCHAR FreeFlags; - EXCEPTION_RECORD ExceptionRecord; - BOOLEAN HeapLocked = FALSE; - PHEAP_VIRTUAL_ALLOC_ENTRY VirtualBlock = NULL; - PHEAP_ENTRY_EXTRA Extra; - NTSTATUS Status; - - /* Force flags */ - Flags |= Heap->ForceFlags; - - /* Call special heap */ - if (RtlpHeapIsSpecial(Flags)) - return RtlDebugAllocateHeap(Heap, Flags, Size); - - /* Check for the maximum size */ - if (Size >= 0x80000000) - { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_NO_MEMORY); - DPRINT1("HEAP: Allocation failed!\n"); - return NULL; - } - - if (Flags & (HEAP_CREATE_ENABLE_TRACING | - HEAP_CREATE_ALIGN_16)) - { - DPRINT1("HEAP: RtlAllocateHeap is called with unsupported flags %x, ignoring\n", Flags); - } - - //DPRINT("RtlAllocateHeap(%p %x %x)\n", Heap, Flags, Size); - - /* Calculate allocation size and index */ - if (Size) - AllocationSize = Size; - else - AllocationSize = 1; - AllocationSize = (AllocationSize + Heap->AlignRound) & Heap->AlignMask; - Index = AllocationSize >> HEAP_ENTRY_SHIFT; - - /* Acquire the lock if necessary */ - if (!(Flags & HEAP_NO_SERIALIZE)) - { - RtlEnterHeapLock(Heap->LockVariable); - HeapLocked = TRUE; - } - - /* Depending on the size, the allocation is going to be done from dedicated, - non-dedicated lists or a virtual block of memory */ - if (Index < HEAP_FREELISTS) - { - FreeListHead = &Heap->FreeLists[Index]; - - if (!IsListEmpty(FreeListHead)) - { - /* There is a free entry in this list */ - FreeBlock = CONTAINING_RECORD(FreeListHead->Blink, - HEAP_FREE_ENTRY, - FreeList); - - /* Save flags and remove the free entry */ - FreeFlags = FreeBlock->Flags; - RtlpRemoveFreeBlock(Heap, FreeBlock, TRUE, FALSE); - - /* Update the total free size of the heap */ - Heap->TotalFreeSize -= Index; - - /* Initialize this block */ - InUseEntry = (PHEAP_ENTRY)FreeBlock; - InUseEntry->Flags = HEAP_ENTRY_BUSY | (FreeFlags & HEAP_ENTRY_LAST_ENTRY); - InUseEntry->UnusedBytes = AllocationSize - Size; - InUseEntry->SmallTagIndex = 0; - } - else - { - /* Find smallest free block which this request could fit in */ - InUseIndex = Index >> 5; - FreeListsInUse = &Heap->u.FreeListsInUseUlong[InUseIndex]; - - /* This bit magic disables all sizes which are less than the requested allocation size */ - FreeListsInUseUlong = *FreeListsInUse++ & ~((1 << ((ULONG)Index & 0x1f)) - 1); - - /* If size is definitily more than our lists - go directly to the non-dedicated one */ - if (InUseIndex > 3) - return RtlpAllocateNonDedicated(Heap, Flags, Size, AllocationSize, Index, HeapLocked); - - /* Go through the list */ - for (i = InUseIndex; i < 4; i++) - { - if (FreeListsInUseUlong) - { - FreeListHead = &Heap->FreeLists[i * 32]; - break; - } - - if (i < 3) FreeListsInUseUlong = *FreeListsInUse++; - } - - /* Nothing found, search in the non-dedicated list */ - if (i == 4) - return RtlpAllocateNonDedicated(Heap, Flags, Size, AllocationSize, Index, HeapLocked); - - /* That list is found, now calculate exact block */ - FreeListHead += RtlpFindLeastSetBit(FreeListsInUseUlong); - - /* Take this entry and remove it from the list of free blocks */ - FreeBlock = CONTAINING_RECORD(FreeListHead->Blink, - HEAP_FREE_ENTRY, - FreeList); - RtlpRemoveFreeBlock(Heap, FreeBlock, TRUE, FALSE); - - /* Split it */ - InUseEntry = RtlpSplitEntry(Heap, FreeBlock, AllocationSize, Index, Size); - } - - /* Release the lock */ - if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); - - /* Zero memory if that was requested */ - if (Flags & HEAP_ZERO_MEMORY) - RtlZeroMemory(InUseEntry + 1, Size); - else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) - { - /* Fill this block with a special pattern */ - RtlFillMemoryUlong(InUseEntry + 1, Size & ~0x3, ARENA_INUSE_FILLER); - } - - /* Fill tail of the block with a special pattern too if requested */ - if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) - { - RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, sizeof(HEAP_ENTRY), HEAP_TAIL_FILL); - InUseEntry->Flags |= HEAP_ENTRY_FILL_PATTERN; - } - - /* Prepare extra if it's present */ - if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) - { - Extra = RtlpGetExtraStuffPointer(InUseEntry); - RtlZeroMemory(Extra, sizeof(HEAP_ENTRY_EXTRA)); - - // TODO: Tagging - } - - /* User data starts right after the entry's header */ - return InUseEntry + 1; - } - else if (Index <= Heap->VirtualMemoryThreshold) - { - /* The block is too large for dedicated lists, but fine for a non-dedicated one */ - return RtlpAllocateNonDedicated(Heap, Flags, Size, AllocationSize, Index, HeapLocked); - } - else if (Heap->Flags & HEAP_GROWABLE) - { - /* We've got a very big allocation request, satisfy it by directly allocating virtual memory */ - AllocationSize += sizeof(HEAP_VIRTUAL_ALLOC_ENTRY) - sizeof(HEAP_ENTRY); - - Status = ZwAllocateVirtualMemory(NtCurrentProcess(), - (PVOID *)&VirtualBlock, - 0, - &AllocationSize, - MEM_COMMIT, - PAGE_READWRITE); - - if (!NT_SUCCESS(Status)) - { - // Set STATUS! - /* Release the lock */ - if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); - DPRINT1("HEAP: Allocation failed!\n"); - return NULL; - } - - /* Initialize the newly allocated block */ - VirtualBlock->BusyBlock.Size = (AllocationSize - Size); - VirtualBlock->BusyBlock.Flags = HEAP_ENTRY_VIRTUAL_ALLOC | HEAP_ENTRY_EXTRA_PRESENT | HEAP_ENTRY_BUSY; - VirtualBlock->CommitSize = AllocationSize; - VirtualBlock->ReserveSize = AllocationSize; - - /* Insert it into the list of virtual allocations */ - InsertTailList(&Heap->VirtualAllocdBlocks, &VirtualBlock->Entry); - - /* Release the lock */ - if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); - - /* Return pointer to user data */ - return VirtualBlock + 1; - } - - /* Generate an exception */ - if (Flags & HEAP_GENERATE_EXCEPTIONS) - { - ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY; - ExceptionRecord.ExceptionRecord = NULL; - ExceptionRecord.NumberParameters = 1; - ExceptionRecord.ExceptionFlags = 0; - ExceptionRecord.ExceptionInformation[0] = AllocationSize; - - RtlRaiseException(&ExceptionRecord); - } - - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_BUFFER_TOO_SMALL); - - /* Release the lock */ - if (HeapLocked) RtlLeaveHeapLock(Heap->LockVariable); - DPRINT1("HEAP: Allocation failed!\n"); - return NULL; -} - - -/*********************************************************************** - * HeapFree (KERNEL32.338) - * RETURNS - * TRUE: Success - * FALSE: Failure - * - * @implemented - */ -BOOLEAN NTAPI RtlFreeHeap( - HANDLE HeapPtr, /* [in] Handle of heap */ - ULONG Flags, /* [in] Heap freeing flags */ - PVOID Ptr /* [in] Address of memory to free */ -) -{ - PHEAP Heap; - PHEAP_ENTRY HeapEntry; - USHORT TagIndex = 0; - SIZE_T BlockSize; - PHEAP_VIRTUAL_ALLOC_ENTRY VirtualEntry; - BOOLEAN Locked = FALSE; - NTSTATUS Status; - - /* Freeing NULL pointer is a legal operation */ - if (!Ptr) return TRUE; - - /* Get pointer to the heap and force flags */ - Heap = (PHEAP)HeapPtr; - Flags |= Heap->ForceFlags; - - /* Call special heap */ - if (RtlpHeapIsSpecial(Flags)) - return RtlDebugFreeHeap(Heap, Flags, Ptr); - - /* Lock if necessary */ - if (!(Flags & HEAP_NO_SERIALIZE)) - { - RtlEnterHeapLock(Heap->LockVariable); - Locked = TRUE; - } - - /* Get pointer to the heap entry */ - HeapEntry = (PHEAP_ENTRY)Ptr - 1; - - /* Check this entry, fail if it's invalid */ - if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY) || - (((ULONG_PTR)Ptr & 0x7) != 0) || - (HeapEntry->SegmentOffset >= HEAP_SEGMENTS)) - { - /* This is an invalid block */ - DPRINT1("HEAP: Trying to free an invalid address %p!\n", Ptr); - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); - - /* Release the heap lock */ - if (Locked) RtlLeaveHeapLock(Heap->LockVariable); - return FALSE; - } - - if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) - { - /* Big allocation */ - VirtualEntry = CONTAINING_RECORD(HeapEntry, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); - - /* Remove it from the list */ - RemoveEntryList(&VirtualEntry->Entry); - - // TODO: Tagging - - BlockSize = 0; - Status = ZwFreeVirtualMemory(NtCurrentProcess(), - (PVOID *)&VirtualEntry, - &BlockSize, - MEM_RELEASE); - - if (!NT_SUCCESS(Status)) - { - DPRINT1("HEAP: Failed releasing memory with Status 0x%08X. Heap %p, ptr %p, base address %p\n", - Status, Heap, Ptr, VirtualEntry); - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(Status); - } - } - else - { - /* Normal allocation */ - BlockSize = HeapEntry->Size; - - // TODO: Tagging - - /* Coalesce in kernel mode, and in usermode if it's not disabled */ - if (RtlpGetMode() == KernelMode || - (RtlpGetMode() == UserMode && !(Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE))) - { - HeapEntry = (PHEAP_ENTRY)RtlpCoalesceFreeBlocks(Heap, - (PHEAP_FREE_ENTRY)HeapEntry, - &BlockSize, - FALSE); - } - - /* If there is no need to decommit the block - put it into a free list */ - if (BlockSize < Heap->DeCommitFreeBlockThreshold || - (Heap->TotalFreeSize + BlockSize < Heap->DeCommitTotalFreeThreshold)) - { - /* Check if it needs to go to a 0 list */ - if (BlockSize > HEAP_MAX_BLOCK_SIZE) - { - /* General-purpose 0 list */ - RtlpInsertFreeBlock(Heap, (PHEAP_FREE_ENTRY)HeapEntry, BlockSize); - } - else - { - /* Usual free list */ - RtlpInsertFreeBlockHelper(Heap, (PHEAP_FREE_ENTRY)HeapEntry, BlockSize, FALSE); - - /* Assert sizes are consistent */ - if (!(HeapEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) - { - ASSERT((HeapEntry + BlockSize)->PreviousSize == BlockSize); - } - - /* Increase the free size */ - Heap->TotalFreeSize += BlockSize; - } - - - if (RtlpGetMode() == UserMode && - TagIndex != 0) - { - // FIXME: Tagging - UNIMPLEMENTED; - } - } - else - { - /* Decommit this block */ - RtlpDeCommitFreeBlock(Heap, (PHEAP_FREE_ENTRY)HeapEntry, BlockSize); - } - } - - /* Release the heap lock */ - if (Locked) RtlLeaveHeapLock(Heap->LockVariable); - - return TRUE; -} - -BOOLEAN NTAPI -RtlpGrowBlockInPlace (IN PHEAP Heap, - IN ULONG Flags, - IN PHEAP_ENTRY InUseEntry, - IN SIZE_T Size, - IN SIZE_T Index) -{ - UCHAR EntryFlags, RememberFlags; - PHEAP_FREE_ENTRY FreeEntry, UnusedEntry, FollowingEntry; - SIZE_T FreeSize, PrevSize, TailPart, AddedSize = 0; - PHEAP_ENTRY_EXTRA OldExtra, NewExtra; - - /* We can't grow beyond specified threshold */ - if (Index > Heap->VirtualMemoryThreshold) - return FALSE; - - /* Get entry flags */ - EntryFlags = InUseEntry->Flags; - - /* Get the next free entry */ - FreeEntry = (PHEAP_FREE_ENTRY)(InUseEntry + InUseEntry->Size); - - if (EntryFlags & HEAP_ENTRY_LAST_ENTRY) - { - /* There is no next block, just uncommitted space. Calculate how much is needed */ - FreeSize = (Index - InUseEntry->Size) << HEAP_ENTRY_SHIFT; - FreeSize = ROUND_UP(FreeSize, PAGE_SIZE); - - /* Find and commit those pages */ - FreeEntry = RtlpFindAndCommitPages(Heap, - Heap->Segments[InUseEntry->SegmentOffset], - &FreeSize, - FreeEntry); - - /* Fail if it failed... */ - if (!FreeEntry) return FALSE; - - /* It was successful, perform coalescing */ - FreeSize = FreeSize >> HEAP_ENTRY_SHIFT; - FreeEntry = RtlpCoalesceFreeBlocks(Heap, FreeEntry, &FreeSize, FALSE); - - /* Check if it's enough */ - if (FreeSize + InUseEntry->Size < Index) - { - /* Still not enough */ - RtlpInsertFreeBlock(Heap, FreeEntry, FreeSize); - Heap->TotalFreeSize += FreeSize; - return FALSE; - } - - /* Remember flags of this free entry */ - RememberFlags = FreeEntry->Flags; - - /* Sum up sizes */ - FreeSize += InUseEntry->Size; - } - else - { - /* The next block indeed exists. Check if it's free or in use */ - if (FreeEntry->Flags & HEAP_ENTRY_BUSY) return FALSE; - - /* Next entry is free, check if it can fit the block we need */ - FreeSize = InUseEntry->Size + FreeEntry->Size; - if (FreeSize < Index) return FALSE; - - /* Remember flags of this free entry */ - RememberFlags = FreeEntry->Flags; - - /* Remove this block from the free list */ - RtlpRemoveFreeBlock(Heap, FreeEntry, FALSE, FALSE); - Heap->TotalFreeSize -= FreeEntry->Size; - } - - PrevSize = (InUseEntry->Size << HEAP_ENTRY_SHIFT) - InUseEntry->UnusedBytes; - FreeSize -= Index; - - /* Don't produce too small blocks */ - if (FreeSize <= 2) - { - Index += FreeSize; - FreeSize = 0; - } - - /* Process extra stuff */ - if (RememberFlags & HEAP_ENTRY_EXTRA_PRESENT) - { - /* Calculate pointers */ - OldExtra = (PHEAP_ENTRY_EXTRA)(InUseEntry + InUseEntry->Size - 1); - NewExtra = (PHEAP_ENTRY_EXTRA)(InUseEntry + Index - 1); - - /* Copy contents */ - *NewExtra = *OldExtra; - - // FIXME Tagging - } - - /* Update sizes */ - InUseEntry->Size = Index; - InUseEntry->UnusedBytes = ((Index << HEAP_ENTRY_SHIFT) - Size); - - /* Check if there is a free space remaining after merging those blocks */ - if (!FreeSize) - { - /* Update flags and sizes */ - InUseEntry->Flags |= RememberFlags & HEAP_ENTRY_LAST_ENTRY; - - /* Either update previous size of the next entry or mark it as a last - entry in the segment*/ - if (RememberFlags & HEAP_ENTRY_LAST_ENTRY) - Heap->Segments[InUseEntry->SegmentOffset]->LastEntryInSegment = InUseEntry; - else - (InUseEntry + InUseEntry->Size)->PreviousSize = InUseEntry->Size; - } - else - { - /* Complex case, we need to split the block to give unused free space - back to the heap */ - UnusedEntry = (PHEAP_FREE_ENTRY)(InUseEntry + Index); - UnusedEntry->PreviousSize = Index; - UnusedEntry->SegmentOffset = InUseEntry->SegmentOffset; - - /* Update the following block or set the last entry in the segment */ - if (RememberFlags & HEAP_ENTRY_LAST_ENTRY) - { - /* Set last entry and set flags and size */ - Heap->Segments[InUseEntry->SegmentOffset]->LastEntryInSegment = InUseEntry; - UnusedEntry->Flags = RememberFlags; - UnusedEntry->Size = FreeSize; - - /* Insert it to the heap and update total size */ - RtlpInsertFreeBlockHelper(Heap, UnusedEntry, FreeSize, FALSE); - Heap->TotalFreeSize += FreeSize; - } - else - { - /* There is a block after this one */ - FollowingEntry = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)UnusedEntry + FreeSize); - - if (FollowingEntry->Flags & HEAP_ENTRY_BUSY) - { - /* Update flags and set size of the unused space entry */ - UnusedEntry->Flags = RememberFlags & (~HEAP_ENTRY_LAST_ENTRY); - UnusedEntry->Size = FreeSize; - - /* Update previous size of the following entry */ - FollowingEntry->PreviousSize = FreeSize; - - /* Insert it to the heap and update total free size */ - RtlpInsertFreeBlockHelper(Heap, UnusedEntry, FreeSize, FALSE); - Heap->TotalFreeSize += FreeSize; - } - else - { - /* That following entry is also free, what a fortune! */ - RememberFlags = FollowingEntry->Flags; - - /* Remove it */ - RtlpRemoveFreeBlock(Heap, FollowingEntry, FALSE, FALSE); - Heap->TotalFreeSize -= FollowingEntry->Size; - - /* And make up a new combined block */ - FreeSize += FollowingEntry->Size; - UnusedEntry->Flags = RememberFlags; - - /* Check where to put it */ - if (FreeSize <= HEAP_MAX_BLOCK_SIZE) - { - /* Fine for a dedicated list */ - UnusedEntry->Size = FreeSize; - - if (RememberFlags & HEAP_ENTRY_LAST_ENTRY) - Heap->Segments[UnusedEntry->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)UnusedEntry; - else - ((PHEAP_ENTRY)UnusedEntry + FreeSize)->PreviousSize = FreeSize; - - /* Insert it back and update total size */ - RtlpInsertFreeBlockHelper(Heap, UnusedEntry, FreeSize, FALSE); - Heap->TotalFreeSize += FreeSize; - } - else - { - /* The block is very large, leave all the hassle to the insertion routine */ - RtlpInsertFreeBlock(Heap, UnusedEntry, FreeSize); - } - } - } - } - - /* Copy user settable flags */ - InUseEntry->Flags &= ~HEAP_ENTRY_SETTABLE_FLAGS; - InUseEntry->Flags |= ((Flags & HEAP_SETTABLE_USER_FLAGS) >> 4); - - /* Properly "zero out" (and fill!) the space */ - if (Flags & HEAP_ZERO_MEMORY) - { - RtlZeroMemory((PCHAR)(InUseEntry + 1) + PrevSize, Size - PrevSize); - } - else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) - { - /* Calculate tail part which we need to fill */ - TailPart = PrevSize & (sizeof(ULONG) - 1); - - /* "Invert" it as usual */ - if (TailPart) TailPart = 4 - TailPart; - - if (Size > (PrevSize + TailPart)) - AddedSize = (Size - (PrevSize + TailPart)) & ~(sizeof(ULONG) - 1); - - if (AddedSize) - { - RtlFillMemoryUlong((PCHAR)(InUseEntry + 1) + PrevSize + TailPart, - AddedSize, - ARENA_INUSE_FILLER); - } - } - - /* Fill the new tail */ - if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) - { - RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, - HEAP_ENTRY_SIZE, - HEAP_TAIL_FILL); - } - - /* Return success */ - return TRUE; -} - -PHEAP_ENTRY_EXTRA NTAPI -RtlpGetExtraStuffPointer(PHEAP_ENTRY HeapEntry) -{ - PHEAP_VIRTUAL_ALLOC_ENTRY VirtualEntry; - - /* Check if it's a big block */ - if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) - { - VirtualEntry = CONTAINING_RECORD(HeapEntry, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); - - /* Return a pointer to the extra stuff*/ - return &VirtualEntry->ExtraStuff; - } - else - { - /* This is a usual entry, which means extra stuff follows this block */ - return (PHEAP_ENTRY_EXTRA)(HeapEntry + HeapEntry->Size - 1); - } -} - - -/*********************************************************************** - * RtlReAllocateHeap - * PARAMS - * Heap [in] Handle of heap block - * Flags [in] Heap reallocation flags - * Ptr, [in] Address of memory to reallocate - * Size [in] Number of bytes to reallocate - * - * RETURNS - * Pointer to reallocated memory block - * NULL: Failure - * 0x7d030f60--invalid flags in RtlHeapAllocate - * @implemented - */ -PVOID NTAPI -RtlReAllocateHeap(HANDLE HeapPtr, - ULONG Flags, - PVOID Ptr, - SIZE_T Size) -{ - PHEAP Heap = (PHEAP)HeapPtr; - PHEAP_ENTRY InUseEntry, NewInUseEntry; - PHEAP_ENTRY_EXTRA OldExtra, NewExtra; - SIZE_T AllocationSize, FreeSize, DecommitSize; - BOOLEAN HeapLocked = FALSE; - PVOID NewBaseAddress; - PHEAP_FREE_ENTRY SplitBlock, SplitBlock2; - SIZE_T OldSize, Index, OldIndex; - UCHAR FreeFlags; - NTSTATUS Status; - PVOID DecommitBase; - SIZE_T RemainderBytes, ExtraSize; - PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; - EXCEPTION_RECORD ExceptionRecord; - - /* Return success in case of a null pointer */ - if (!Ptr) - { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_SUCCESS); - return NULL; - } - - /* Force heap flags */ - Flags |= Heap->ForceFlags; - - /* Call special heap */ - if (RtlpHeapIsSpecial(Flags)) - return RtlDebugReAllocateHeap(Heap, Flags, Ptr, Size); - - /* Make sure size is valid */ - if (Size >= 0x80000000) - { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_NO_MEMORY); - return NULL; - } - - /* Calculate allocation size and index */ - if (Size) - AllocationSize = Size; - else - AllocationSize = 1; - AllocationSize = (AllocationSize + Heap->AlignRound) & Heap->AlignMask; - - /* Add up extra stuff, if it is present anywhere */ - if (((((PHEAP_ENTRY)Ptr)-1)->Flags & HEAP_ENTRY_EXTRA_PRESENT) || - (Flags & HEAP_EXTRA_FLAGS_MASK) || - Heap->PseudoTagEntries) - { - AllocationSize += sizeof(HEAP_ENTRY_EXTRA); - } - - /* Acquire the lock if necessary */ - if (!(Flags & HEAP_NO_SERIALIZE)) - { - RtlEnterHeapLock(Heap->LockVariable); - HeapLocked = TRUE; - Flags ^= HEAP_NO_SERIALIZE; - } - - /* Get the pointer to the in-use entry */ - InUseEntry = (PHEAP_ENTRY)Ptr - 1; - - /* If that entry is not really in-use, we have a problem */ - if (!(InUseEntry->Flags & HEAP_ENTRY_BUSY)) - { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); - - /* Release the lock and return */ - if (HeapLocked) - RtlLeaveHeapLock(Heap->LockVariable); - return Ptr; - } - - if (InUseEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) - { - /* This is a virtually allocated block. Get its size */ - OldSize = RtlpGetSizeOfBigBlock(InUseEntry); - - /* Convert it to an index */ - OldIndex = (OldSize + InUseEntry->Size) >> HEAP_ENTRY_SHIFT; - - /* Calculate new allocation size and round it to the page size */ - AllocationSize += FIELD_OFFSET(HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); - AllocationSize = ROUND_UP(AllocationSize, PAGE_SIZE); - } - else - { - /* Usual entry */ - OldIndex = InUseEntry->Size; - - OldSize = (OldIndex << HEAP_ENTRY_SHIFT) - InUseEntry->UnusedBytes; - } - - /* Calculate new index */ - Index = AllocationSize >> HEAP_ENTRY_SHIFT; - - /* Check for 4 different scenarios (old size, new size, old index, new index) */ - if (Index <= OldIndex) - { - /* Difference must be greater than 1, adjust if it's not so */ - if (Index + 1 == OldIndex) - { - Index++; - AllocationSize += sizeof(HEAP_ENTRY); - } - - /* Calculate new size */ - if (InUseEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) - { - /* Simple in case of a virtual alloc - just an unused size */ - InUseEntry->Size = AllocationSize - Size; - } - else if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) - { - /* There is extra stuff, take it into account */ - OldExtra = (PHEAP_ENTRY_EXTRA)(InUseEntry + InUseEntry->Size - 1); - NewExtra = (PHEAP_ENTRY_EXTRA)(InUseEntry + Index - 1); - *NewExtra = *OldExtra; - - // FIXME Tagging, TagIndex - - /* Update unused bytes count */ - InUseEntry->UnusedBytes = AllocationSize - Size; - } - else - { - // FIXME Tagging, SmallTagIndex - InUseEntry->UnusedBytes = AllocationSize - Size; - } - - /* If new size is bigger than the old size */ - if (Size > OldSize) - { - /* Zero out that additional space if required */ - if (Flags & HEAP_ZERO_MEMORY) - { - RtlZeroMemory((PCHAR)Ptr + OldSize, Size - OldSize); - } - else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) - { - /* Fill it on free if required */ - RemainderBytes = OldSize & (sizeof(ULONG) - 1); - - if (RemainderBytes) - RemainderBytes = 4 - RemainderBytes; - - if (Size > (OldSize + RemainderBytes)) - { - /* Calculate actual amount of extra bytes to fill */ - ExtraSize = (Size - (OldSize + RemainderBytes)) & ~(sizeof(ULONG) - 1); - - /* Fill them if there are any */ - if (ExtraSize != 0) - { - RtlFillMemoryUlong((PCHAR)(InUseEntry + 1) + OldSize + RemainderBytes, - ExtraSize, - ARENA_INUSE_FILLER); - } - } - } - } - - /* Fill tail of the heap entry if required */ - if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) - { - RtlFillMemory((PCHAR)(InUseEntry + 1) + Size, - HEAP_ENTRY_SIZE, - HEAP_TAIL_FILL); - } - - /* Check if the difference is significant or not */ - if (Index != OldIndex) - { - /* Save flags */ - FreeFlags = InUseEntry->Flags & ~HEAP_ENTRY_BUSY; - - if (FreeFlags & HEAP_ENTRY_VIRTUAL_ALLOC) - { - /* This is a virtual block allocation */ - VirtualAllocBlock = CONTAINING_RECORD(InUseEntry, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock); - - // FIXME Tagging! - - DecommitBase = (PCHAR)VirtualAllocBlock + AllocationSize; - DecommitSize = (OldIndex << HEAP_ENTRY_SHIFT) - AllocationSize; - - /* Release the memory */ - Status = ZwFreeVirtualMemory(NtCurrentProcess(), - (PVOID *)&DecommitBase, - &DecommitSize, - MEM_RELEASE); - - if (!NT_SUCCESS(Status)) - { - DPRINT1("HEAP: Unable to release memory (pointer %p, size 0x%x), Status %08x\n", DecommitBase, DecommitSize, Status); - } - else - { - /* Otherwise reduce the commit size */ - VirtualAllocBlock->CommitSize -= DecommitSize; - } - } - else - { - /* Reduce size of the block and possibly split it */ - SplitBlock = (PHEAP_FREE_ENTRY)(InUseEntry + Index); - - /* Initialize this entry */ - SplitBlock->Flags = FreeFlags; - SplitBlock->PreviousSize = Index; - SplitBlock->SegmentOffset = InUseEntry->SegmentOffset; - - /* Remember free size */ - FreeSize = InUseEntry->Size - Index; - - /* Set new size */ - InUseEntry->Size = Index; - InUseEntry->Flags &= ~HEAP_ENTRY_LAST_ENTRY; - - /* Is that the last entry */ - if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) - { - /* Update segment's last entry */ - Heap->Segments[SplitBlock->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock; - - /* Set its size and insert it to the list */ - SplitBlock->Size = (USHORT)FreeSize; - RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); - - /* Update total free size */ - Heap->TotalFreeSize += FreeSize; - } - else - { - /* Get the block after that one */ - SplitBlock2 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize); - - if (SplitBlock2->Flags & HEAP_ENTRY_BUSY) - { - /* It's in use, add it here*/ - SplitBlock->Size = (USHORT)FreeSize; - - /* Update previous size of the next entry */ - ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = (USHORT)FreeSize; - - /* Insert it to the list */ - RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); - - /* Update total size */ - Heap->TotalFreeSize += FreeSize; - } - else - { - /* Next entry is free, so merge with it */ - SplitBlock->Flags = SplitBlock2->Flags; - - /* Remove it, update total size */ - RtlpRemoveFreeBlock(Heap, SplitBlock2, FALSE, FALSE); - Heap->TotalFreeSize -= SplitBlock2->Size; - - /* Calculate total free size */ - FreeSize += SplitBlock2->Size; - - if (FreeSize <= HEAP_MAX_BLOCK_SIZE) - { - SplitBlock->Size = FreeSize; - - if (!(SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) - { - /* Update previous size of the next entry */ - ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = FreeSize; - } - else - { - Heap->Segments[SplitBlock->SegmentOffset]->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock; - } - - /* Insert the new one back and update total size */ - RtlpInsertFreeBlockHelper(Heap, SplitBlock, FreeSize, FALSE); - Heap->TotalFreeSize += FreeSize; - } - else - { - /* Just add it */ - RtlpInsertFreeBlock(Heap, SplitBlock, FreeSize); - } - } - } - } - } - } - else - { - /* We're growing the block */ - if ((InUseEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) || - !RtlpGrowBlockInPlace(Heap, Flags, InUseEntry, Size, Index)) - { - /* Growing in place failed, so growing out of place */ - if (Flags & HEAP_REALLOC_IN_PLACE_ONLY) - { - DPRINT1("Realloc in place failed, but it was the only option\n"); - Ptr = NULL; - } - else - { - /* Clear tag bits */ - Flags &= ~HEAP_TAG_MASK; - - /* Process extra stuff */ - if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) - { - /* Preserve user settable flags */ - Flags &= ~HEAP_SETTABLE_USER_FLAGS; - - Flags |= HEAP_SETTABLE_USER_VALUE | ((InUseEntry->Flags & HEAP_ENTRY_SETTABLE_FLAGS) << 4); - - /* Get pointer to the old extra data */ - OldExtra = RtlpGetExtraStuffPointer(InUseEntry); - - /* Save tag index if it was set */ - if (OldExtra->TagIndex && - !(OldExtra->TagIndex & HEAP_PSEUDO_TAG_FLAG)) - { - Flags |= OldExtra->TagIndex << HEAP_TAG_SHIFT; - } - } - else if (InUseEntry->SmallTagIndex) - { - /* Take small tag index into account */ - Flags |= InUseEntry->SmallTagIndex << HEAP_TAG_SHIFT; - } - - /* Allocate new block from the heap */ - NewBaseAddress = RtlAllocateHeap(HeapPtr, - Flags & ~HEAP_ZERO_MEMORY, - Size); - - /* Proceed if it didn't fail */ - if (NewBaseAddress) - { - /* Get new entry pointer */ - NewInUseEntry = (PHEAP_ENTRY)NewBaseAddress - 1; - - /* Process extra stuff if it exists */ - if (NewInUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) - { - NewExtra = RtlpGetExtraStuffPointer(NewInUseEntry); - - if (InUseEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) - { - OldExtra = RtlpGetExtraStuffPointer(InUseEntry); - NewExtra->Settable = OldExtra->Settable; - } - else - { - RtlZeroMemory(NewExtra, sizeof(*NewExtra)); - } - } - - /* Copy actual user bits */ - if (Size < OldSize) - RtlMoveMemory(NewBaseAddress, Ptr, Size); - else - RtlMoveMemory(NewBaseAddress, Ptr, OldSize); - - /* Zero remaining part if required */ - if (Size > OldSize && - (Flags & HEAP_ZERO_MEMORY)) - { - RtlZeroMemory((PCHAR)NewBaseAddress + OldSize, Size - OldSize); - } - - /* Free the old block */ - RtlFreeHeap(HeapPtr, Flags, Ptr); - } - - Ptr = NewBaseAddress; - } - } - } - - /* Did resizing fail? */ - if (!Ptr && (Flags & HEAP_GENERATE_EXCEPTIONS)) - { - /* Generate an exception if required */ - ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY; - ExceptionRecord.ExceptionRecord = NULL; - ExceptionRecord.NumberParameters = 1; - ExceptionRecord.ExceptionFlags = 0; - ExceptionRecord.ExceptionInformation[0] = AllocationSize; - - RtlRaiseException(&ExceptionRecord); - } - - /* Release the heap lock if it was acquired */ - if (HeapLocked) - RtlLeaveHeapLock(Heap->LockVariable); - - return Ptr; -} - - -/*********************************************************************** - * RtlCompactHeap - * - * @unimplemented - */ -ULONG NTAPI -RtlCompactHeap(HANDLE Heap, - ULONG Flags) -{ - UNIMPLEMENTED; - return 0; -} - - -/*********************************************************************** - * RtlLockHeap - * Attempts to acquire the critical section object for a specified heap. - * - * PARAMS - * Heap [in] Handle of heap to lock for exclusive access - * - * RETURNS - * TRUE: Success - * FALSE: Failure - * - * @implemented - */ -BOOLEAN NTAPI -RtlLockHeap(IN HANDLE HeapPtr) -{ - PHEAP Heap = (PHEAP)HeapPtr; - - // FIXME Check for special heap - - /* Check if it's really a heap */ - if (Heap->Signature != HEAP_SIGNATURE) return FALSE; - - /* Lock if it's lockable */ - if (!(Heap->Flags & HEAP_NO_SERIALIZE)) - { - RtlEnterHeapLock(Heap->LockVariable); - } - - return TRUE; -} - - -/*********************************************************************** - * RtlUnlockHeap - * Releases ownership of the critical section object. - * - * PARAMS - * Heap [in] Handle to the heap to unlock - * - * RETURNS - * TRUE: Success - * FALSE: Failure - * - * @implemented - */ -BOOLEAN NTAPI -RtlUnlockHeap(HANDLE HeapPtr) -{ - PHEAP Heap = (PHEAP)HeapPtr; - - // FIXME Check for special heap - - /* Check if it's really a heap */ - if (Heap->Signature != HEAP_SIGNATURE) return FALSE; - - /* Unlock if it's lockable */ - if (!(Heap->Flags & HEAP_NO_SERIALIZE)) - { - RtlLeaveHeapLock(Heap->LockVariable); - } - - return TRUE; -} - - -/*********************************************************************** - * RtlSizeHeap - * PARAMS - * Heap [in] Handle of heap - * Flags [in] Heap size control flags - * Ptr [in] Address of memory to return size for - * - * RETURNS - * Size in bytes of allocated memory - * 0xffffffff: Failure - * - * @implemented - */ -SIZE_T NTAPI -RtlSizeHeap( - HANDLE HeapPtr, - ULONG Flags, - PVOID Ptr -) -{ - PHEAP Heap = (PHEAP)HeapPtr; - PHEAP_ENTRY HeapEntry; - SIZE_T EntrySize; - - // FIXME This is a hack around missing SEH support! - if (!Heap) - { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_HANDLE); - return (SIZE_T)-1; - } - - /* Force flags */ - Flags |= Heap->ForceFlags; - - /* Call special heap */ - if (RtlpHeapIsSpecial(Flags)) - return RtlDebugSizeHeap(Heap, Flags, Ptr); - - /* Get the heap entry pointer */ - HeapEntry = (PHEAP_ENTRY)Ptr - 1; - - /* Return -1 if that entry is free */ - if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) - { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); - return (SIZE_T)-1; - } - - /* Get size of this block depending if it's a usual or a big one */ - if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) - { - EntrySize = RtlpGetSizeOfBigBlock(HeapEntry); - } - else - { - /* Calculate it */ - EntrySize = (HeapEntry->Size << HEAP_ENTRY_SHIFT) - HeapEntry->UnusedBytes; - } - - /* Return calculated size */ - return EntrySize; -} - -BOOLEAN NTAPI -RtlpCheckInUsePattern(PHEAP_ENTRY HeapEntry) -{ - SIZE_T Size, Result; - PCHAR TailPart; - - /* Calculate size */ - if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) - Size = RtlpGetSizeOfBigBlock(HeapEntry); - else - Size = (HeapEntry->Size << HEAP_ENTRY_SHIFT) - HeapEntry->UnusedBytes; - - /* Calculate pointer to the tail part of the block */ - TailPart = (PCHAR)(HeapEntry + 1) + Size; - - /* Compare tail pattern */ - Result = RtlCompareMemory(TailPart, - FillPattern, - HEAP_ENTRY_SIZE); - - if (Result != HEAP_ENTRY_SIZE) - { - DPRINT1("HEAP: Heap entry (size %x) %p tail is modified at %p\n", Size, HeapEntry, TailPart + Result); - return FALSE; - } - - /* All is fine */ - return TRUE; -} - -BOOLEAN NTAPI -RtlpValidateHeapHeaders( - PHEAP Heap, - BOOLEAN Recalculate) -{ - // We skip header validation for now - return TRUE; -} - -BOOLEAN NTAPI -RtlpValidateHeapEntry( - PHEAP Heap, - PHEAP_ENTRY HeapEntry) -{ - BOOLEAN BigAllocation, EntryFound = FALSE; - PHEAP_SEGMENT Segment; - ULONG SegmentOffset; - - /* Perform various consistency checks of this entry */ - if (!HeapEntry) goto invalid_entry; - if ((ULONG_PTR)HeapEntry & (HEAP_ENTRY_SIZE - 1)) goto invalid_entry; - if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) goto invalid_entry; - - BigAllocation = HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC; - Segment = Heap->Segments[HeapEntry->SegmentOffset]; - - if (BigAllocation && - (((ULONG_PTR)HeapEntry & (PAGE_SIZE - 1)) != FIELD_OFFSET(HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock))) - goto invalid_entry; - - if (!BigAllocation && (HeapEntry->SegmentOffset >= HEAP_SEGMENTS || - !Segment || - HeapEntry < Segment->FirstEntry || - HeapEntry >= Segment->LastValidEntry)) - goto invalid_entry; - - if ((HeapEntry->Flags & HEAP_ENTRY_FILL_PATTERN) && - !RtlpCheckInUsePattern(HeapEntry)) - goto invalid_entry; - - /* Checks are done, if this is a virtual entry, that's all */ - if (HeapEntry->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) return TRUE; - - /* Go through segments and check if this entry fits into any of them */ - for (SegmentOffset = 0; SegmentOffset < HEAP_SEGMENTS; SegmentOffset++) - { - Segment = Heap->Segments[SegmentOffset]; - if (!Segment) continue; - - if ((HeapEntry >= Segment->FirstEntry) && - (HeapEntry < Segment->LastValidEntry)) - { - /* Got it */ - EntryFound = TRUE; - break; - } - } - - /* Return our result of finding entry in the segments */ - return EntryFound; - -invalid_entry: - DPRINT1("HEAP: Invalid heap entry %p in heap %p\n", HeapEntry, Heap); - return FALSE; -} - -BOOLEAN NTAPI -RtlpValidateHeapSegment( - PHEAP Heap, - PHEAP_SEGMENT Segment, - UCHAR SegmentOffset, - PULONG FreeEntriesCount, - PSIZE_T TotalFreeSize, - PSIZE_T TagEntries, - PSIZE_T PseudoTagEntries) -{ - PHEAP_UCR_DESCRIPTOR UcrDescriptor; - PLIST_ENTRY UcrEntry; - SIZE_T ByteSize, Size, Result; - PHEAP_ENTRY CurrentEntry; - ULONG UnCommittedPages; - ULONG UnCommittedRanges; - ULONG PreviousSize; - - UnCommittedPages = 0; - UnCommittedRanges = 0; - - if (IsListEmpty(&Segment->UCRSegmentList)) - { - UcrEntry = NULL; - UcrDescriptor = NULL; - } - else - { - UcrEntry = Segment->UCRSegmentList.Flink; - UcrDescriptor = CONTAINING_RECORD(UcrEntry, HEAP_UCR_DESCRIPTOR, SegmentEntry); - } - - if (Segment->BaseAddress == Heap) - CurrentEntry = &Heap->Entry; - else - CurrentEntry = &Segment->Entry; - - while (CurrentEntry < Segment->LastValidEntry) - { - if (UcrDescriptor && - ((PVOID)CurrentEntry >= UcrDescriptor->Address)) - { - DPRINT1("HEAP: Entry %p is not inside uncommited range [%p .. %p)\n", - CurrentEntry, UcrDescriptor->Address, - (PCHAR)UcrDescriptor->Address + UcrDescriptor->Size); - - return FALSE; - } - - PreviousSize = 0; - - while (CurrentEntry < Segment->LastValidEntry) - { - if (PreviousSize != CurrentEntry->PreviousSize) - { - DPRINT1("HEAP: Entry %p has incorrect PreviousSize %x instead of %x\n", - CurrentEntry, CurrentEntry->PreviousSize, PreviousSize); - - return FALSE; - } - - PreviousSize = CurrentEntry->Size; - Size = CurrentEntry->Size << HEAP_ENTRY_SHIFT; - - if (CurrentEntry->Flags & HEAP_ENTRY_BUSY) - { - if (TagEntries) - { - UNIMPLEMENTED; - } - - /* Check fill pattern */ - if (CurrentEntry->Flags & HEAP_ENTRY_FILL_PATTERN) - { - if (!RtlpCheckInUsePattern(CurrentEntry)) - return FALSE; - } - } - else - { - /* The entry is free, increase free entries count and total free size */ - *FreeEntriesCount = *FreeEntriesCount + 1; - *TotalFreeSize += CurrentEntry->Size; - - if ((Heap->Flags & HEAP_FREE_CHECKING_ENABLED) && - (CurrentEntry->Flags & HEAP_ENTRY_FILL_PATTERN)) - { - ByteSize = Size - sizeof(HEAP_FREE_ENTRY); - - if ((CurrentEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) && - (ByteSize > sizeof(HEAP_FREE_ENTRY_EXTRA))) - { - ByteSize -= sizeof(HEAP_FREE_ENTRY_EXTRA); - } - - Result = RtlCompareMemoryUlong((PCHAR)((PHEAP_FREE_ENTRY)CurrentEntry + 1), - ByteSize, - ARENA_FREE_FILLER); - - if (Result != ByteSize) - { - DPRINT1("HEAP: Free heap block %p modified at %p after it was freed\n", - CurrentEntry, - (PCHAR)(CurrentEntry + 1) + Result); - - return FALSE; - } - } - } - - if (CurrentEntry->SegmentOffset != SegmentOffset) - { - DPRINT1("HEAP: Heap entry %p SegmentOffset is incorrect %x (should be %x)\n", CurrentEntry, SegmentOffset, CurrentEntry->SegmentOffset); - return FALSE; - } - - /* Check if it's the last entry */ - if (CurrentEntry->Flags & HEAP_ENTRY_LAST_ENTRY) - { - CurrentEntry = (PHEAP_ENTRY)((PCHAR)CurrentEntry + Size); - - if (!UcrDescriptor) - { - /* Check if it's not really the last one */ - if (CurrentEntry != Segment->LastValidEntry) - { - DPRINT1("HEAP: Heap entry %p is not last block in segment (%x)\n", CurrentEntry, Segment->LastValidEntry); - return FALSE; - } - } - else if (CurrentEntry != UcrDescriptor->Address) - { - DPRINT1("HEAP: Heap entry %p does not match next uncommitted address (%p)\n", - CurrentEntry, UcrDescriptor->Address); - - return FALSE; - } - else - { - UnCommittedPages += (UcrDescriptor->Size / PAGE_SIZE); - UnCommittedRanges++; - - CurrentEntry = (PHEAP_ENTRY)((PCHAR)UcrDescriptor->Address + UcrDescriptor->Size); - - /* Go to the next UCR descriptor */ - UcrEntry = UcrEntry->Flink; - if (UcrEntry == &Segment->UCRSegmentList) - { - UcrEntry = NULL; - UcrDescriptor = NULL; - } - else - { - UcrDescriptor = CONTAINING_RECORD(UcrEntry, HEAP_UCR_DESCRIPTOR, SegmentEntry); - } - } - - break; - } - - /* Advance to the next entry */ - CurrentEntry = (PHEAP_ENTRY)((PCHAR)CurrentEntry + Size); - } - } - - /* Check total numbers of UCP and UCR */ - if (Segment->NumberOfUnCommittedPages != UnCommittedPages) - { - DPRINT1("HEAP: Segment %p NumberOfUnCommittedPages is invalid (%x != %x)\n", - Segment, Segment->NumberOfUnCommittedPages, UnCommittedPages); - - return FALSE; - } - - if (Segment->NumberOfUnCommittedRanges != UnCommittedRanges) - { - DPRINT1("HEAP: Segment %p NumberOfUnCommittedRanges is invalid (%x != %x)\n", - Segment, Segment->NumberOfUnCommittedRanges, UnCommittedRanges); - - return FALSE; - } - - return TRUE; -} - -BOOLEAN NTAPI -RtlpValidateHeap(PHEAP Heap, - BOOLEAN ForceValidation) -{ - PHEAP_SEGMENT Segment; - BOOLEAN EmptyList; - UCHAR SegmentOffset; - SIZE_T Size, TotalFreeSize; - ULONG PreviousSize; - PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; - PLIST_ENTRY ListHead, NextEntry; - PHEAP_FREE_ENTRY FreeEntry; - ULONG FreeBlocksCount, FreeListEntriesCount; - - /* Check headers */ - if (!RtlpValidateHeapHeaders(Heap, FALSE)) - return FALSE; - - /* Skip validation if it's not needed */ - if (!ForceValidation && !(Heap->Flags & HEAP_VALIDATE_ALL_ENABLED)) - return TRUE; - - /* Check free lists bitmaps */ - FreeListEntriesCount = 0; - ListHead = &Heap->FreeLists[0]; - - for (Size = 0; Size < HEAP_FREELISTS; Size++) - { - if (Size) - { - /* This is a dedicated list. Check if it's empty */ - EmptyList = IsListEmpty(ListHead); - - if (Heap->u.FreeListsInUseBytes[Size >> 3] & (1 << (Size & 7))) - { - if (EmptyList) - { - DPRINT1("HEAP: Empty %x-free list marked as non-empty\n", Size); - return FALSE; - } - } - else - { - if (!EmptyList) - { - DPRINT1("HEAP: Non-empty %x-free list marked as empty\n", Size); - return FALSE; - } - } - } - - /* Now check this list entries */ - NextEntry = ListHead->Flink; - PreviousSize = 0; - - while (ListHead != NextEntry) - { - FreeEntry = CONTAINING_RECORD(NextEntry, HEAP_FREE_ENTRY, FreeList); - NextEntry = NextEntry->Flink; - - /* If there is an in-use entry in a free list - that's quite a big problem */ - if (FreeEntry->Flags & HEAP_ENTRY_BUSY) - { - DPRINT1("HEAP: %x-dedicated list free element %x is marked in-use\n", Size, FreeEntry); - return FALSE; - } - - /* Check sizes according to that specific list's size */ - if ((Size == 0) && (FreeEntry->Size < HEAP_FREELISTS)) - { - DPRINT1("HEAP: Non dedicated list free element %x has size %x which would fit a dedicated list\n", FreeEntry, FreeEntry->Size); - return FALSE; - } - else if (Size && (FreeEntry->Size != Size)) - { - DPRINT1("HEAP: %x-dedicated list free element %x has incorrect size %x\n", Size, FreeEntry, FreeEntry->Size); - return FALSE; - } - else if ((Size == 0) && (FreeEntry->Size < PreviousSize)) - { - DPRINT1("HEAP: Non dedicated list free element %x is not put in order\n", FreeEntry); - return FALSE; - } - - /* Remember previous size*/ - PreviousSize = FreeEntry->Size; - - /* Add up to the total amount of free entries */ - FreeListEntriesCount++; - } - - /* Go to the head of the next free list */ - ListHead++; - } - - /* Check big allocations */ - ListHead = &Heap->VirtualAllocdBlocks; - NextEntry = ListHead->Flink; - - while (ListHead != NextEntry) - { - VirtualAllocBlock = CONTAINING_RECORD(NextEntry, HEAP_VIRTUAL_ALLOC_ENTRY, Entry); - - /* We can only check the fill pattern */ - if (VirtualAllocBlock->BusyBlock.Flags & HEAP_ENTRY_FILL_PATTERN) - { - if (!RtlpCheckInUsePattern(&VirtualAllocBlock->BusyBlock)) - return FALSE; - } - - NextEntry = NextEntry->Flink; - } - - /* Check all segments */ - FreeBlocksCount = 0; - TotalFreeSize = 0; - - for (SegmentOffset = 0; SegmentOffset < HEAP_SEGMENTS; SegmentOffset++) - { - Segment = Heap->Segments[SegmentOffset]; - - /* Go to the next one if there is no segment */ - if (!Segment) continue; - - if (!RtlpValidateHeapSegment(Heap, - Segment, - SegmentOffset, - &FreeBlocksCount, - &TotalFreeSize, - NULL, - NULL)) - { - return FALSE; - } - } - - if (FreeListEntriesCount != FreeBlocksCount) - { - DPRINT1("HEAP: Free blocks count in arena (%d) does not match free blocks number in the free lists (%d)\n", FreeBlocksCount, FreeListEntriesCount); - return FALSE; - } - - if (Heap->TotalFreeSize != TotalFreeSize) - { - DPRINT1("HEAP: Total size of free blocks in arena (%d) does not equal to the one in heap header (%d)\n", TotalFreeSize, Heap->TotalFreeSize); - return FALSE; - } - - return TRUE; -} - -/*********************************************************************** - * RtlValidateHeap - * Validates a specified heap. - * - * PARAMS - * Heap [in] Handle to the heap - * Flags [in] Bit flags that control access during operation - * Block [in] Optional pointer to memory block to validate - * - * NOTES - * Flags is ignored. - * - * RETURNS - * TRUE: Success - * FALSE: Failure - * - * @implemented - */ -BOOLEAN NTAPI RtlValidateHeap( - HANDLE HeapPtr, - ULONG Flags, - PVOID Block -) -{ - PHEAP Heap = (PHEAP)HeapPtr; - BOOLEAN HeapLocked = FALSE; - BOOLEAN HeapValid; - - // FIXME Check for special heap - - /* Check signature */ - if (Heap->Signature != HEAP_SIGNATURE) - { - DPRINT1("HEAP: Signature %x is invalid for heap %p\n", Heap->Signature, Heap); - return FALSE; - } - - /* Force flags */ - Flags = Heap->ForceFlags; - - /* Acquire the lock if necessary */ - if (!(Flags & HEAP_NO_SERIALIZE)) - { - RtlEnterHeapLock(Heap->LockVariable); - HeapLocked = TRUE; - } - - /* Either validate whole heap or just one entry */ - if (!Block) - HeapValid = RtlpValidateHeap(Heap, TRUE); - else - HeapValid = RtlpValidateHeapEntry(Heap, (PHEAP_ENTRY)Block - 1); - - /* Unlock if it's lockable */ - if (HeapLocked) - { - RtlLeaveHeapLock(Heap->LockVariable); - } - - return HeapValid; -} - -VOID -RtlInitializeHeapManager(VOID) -{ - PPEB Peb; - - /* Get PEB */ - Peb = RtlGetCurrentPeb(); - - /* Initialize heap-related fields of PEB */ - Peb->NumberOfHeaps = 0; - - /* Initialize the process heaps list protecting lock */ - RtlInitializeHeapLock(&RtlpProcessHeapsListLock); -} - - -/* - * @implemented - */ -NTSTATUS NTAPI -RtlEnumProcessHeaps(PHEAP_ENUMERATION_ROUTINE HeapEnumerationRoutine, - PVOID lParam) -{ - UNIMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; -} - - -/* - * @implemented - */ -ULONG NTAPI -RtlGetProcessHeaps(ULONG count, - HANDLE *heaps) -{ - UNIMPLEMENTED; - return 0; -} - - -/* - * @implemented - */ -BOOLEAN NTAPI -RtlValidateProcessHeaps(VOID) -{ - UNIMPLEMENTED; - return TRUE; -} - - -/* - * @unimplemented - */ -BOOLEAN NTAPI -RtlZeroHeap( - IN PVOID HeapHandle, - IN ULONG Flags - ) -{ - UNIMPLEMENTED; - return FALSE; -} - -/* - * @implemented - */ -BOOLEAN -NTAPI -RtlSetUserValueHeap(IN PVOID HeapHandle, - IN ULONG Flags, - IN PVOID BaseAddress, - IN PVOID UserValue) -{ - PHEAP Heap = (PHEAP)HeapHandle; - PHEAP_ENTRY HeapEntry; - PHEAP_ENTRY_EXTRA Extra; - BOOLEAN HeapLocked = FALSE; - - /* Force flags */ - Flags |= Heap->Flags; - - /* Call special heap */ - if (RtlpHeapIsSpecial(Flags)) - return RtlDebugSetUserValueHeap(Heap, Flags, BaseAddress, UserValue); - - /* Lock if it's lockable */ - if (!(Heap->Flags & HEAP_NO_SERIALIZE)) - { - RtlEnterHeapLock(Heap->LockVariable); - HeapLocked = TRUE; - } - - /* Get a pointer to the entry */ - HeapEntry = (PHEAP_ENTRY)BaseAddress - 1; - - /* If it's a free entry - return error */ - if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) - { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); - - /* Release the heap lock if it was acquired */ - if (HeapLocked) - RtlLeaveHeapLock(Heap->LockVariable); - - return FALSE; - } - - /* Check if this entry has an extra stuff associated with it */ - if (HeapEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) - { - /* Use extra to store the value */ - Extra = RtlpGetExtraStuffPointer(HeapEntry); - Extra->Settable = (ULONG_PTR)UserValue; - } - - /* Release the heap lock if it was acquired */ - if (HeapLocked) - RtlLeaveHeapLock(Heap->LockVariable); - - return TRUE; -} - -/* - * @implemented - */ -BOOLEAN -NTAPI -RtlSetUserFlagsHeap(IN PVOID HeapHandle, - IN ULONG Flags, - IN PVOID BaseAddress, - IN ULONG UserFlagsReset, - IN ULONG UserFlagsSet) -{ - PHEAP Heap = (PHEAP)HeapHandle; - PHEAP_ENTRY HeapEntry; - BOOLEAN HeapLocked = FALSE; - - /* Force flags */ - Flags |= Heap->Flags; - - /* Call special heap */ - if (RtlpHeapIsSpecial(Flags)) - return RtlDebugSetUserFlagsHeap(Heap, Flags, BaseAddress, UserFlagsReset, UserFlagsSet); - - /* Lock if it's lockable */ - if (!(Heap->Flags & HEAP_NO_SERIALIZE)) - { - RtlEnterHeapLock(Heap->LockVariable); - HeapLocked = TRUE; - } - - /* Get a pointer to the entry */ - HeapEntry = (PHEAP_ENTRY)BaseAddress - 1; - - /* If it's a free entry - return error */ - if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) - { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); - - /* Release the heap lock if it was acquired */ - if (HeapLocked) - RtlLeaveHeapLock(Heap->LockVariable); - - return FALSE; - } - - /* Set / reset flags */ - HeapEntry->Flags &= ~(UserFlagsReset >> 4); - HeapEntry->Flags |= (UserFlagsSet >> 4); - - /* Release the heap lock if it was acquired */ - if (HeapLocked) - RtlLeaveHeapLock(Heap->LockVariable); - - return TRUE; -} - -/* - * @implemented - */ -BOOLEAN -NTAPI -RtlGetUserInfoHeap(IN PVOID HeapHandle, - IN ULONG Flags, - IN PVOID BaseAddress, - OUT PVOID *UserValue, - OUT PULONG UserFlags) -{ - PHEAP Heap = (PHEAP)HeapHandle; - PHEAP_ENTRY HeapEntry; - PHEAP_ENTRY_EXTRA Extra; - BOOLEAN HeapLocked = FALSE; - - /* Force flags */ - Flags |= Heap->Flags; - - /* Call special heap */ - if (RtlpHeapIsSpecial(Flags)) - return RtlDebugGetUserInfoHeap(Heap, Flags, BaseAddress, UserValue, UserFlags); - - /* Lock if it's lockable */ - if (!(Heap->Flags & HEAP_NO_SERIALIZE)) - { - RtlEnterHeapLock(Heap->LockVariable); - HeapLocked = TRUE; - } - - /* Get a pointer to the entry */ - HeapEntry = (PHEAP_ENTRY)BaseAddress - 1; - - /* If it's a free entry - return error */ - if (!(HeapEntry->Flags & HEAP_ENTRY_BUSY)) - { - RtlSetLastWin32ErrorAndNtStatusFromNtStatus(STATUS_INVALID_PARAMETER); - - /* Release the heap lock if it was acquired */ - if (HeapLocked) - RtlLeaveHeapLock(Heap->LockVariable); - - return FALSE; - } - - /* Check if this entry has an extra stuff associated with it */ - if (HeapEntry->Flags & HEAP_ENTRY_EXTRA_PRESENT) - { - /* Get pointer to extra data */ - Extra = RtlpGetExtraStuffPointer(HeapEntry); - - /* Pass user value */ - if (UserValue) - *UserValue = (PVOID)Extra->Settable; - - /* Decode and return user flags */ - if (UserFlags) - *UserFlags = (HeapEntry->Flags & HEAP_ENTRY_SETTABLE_FLAGS) << 4; - } - - /* Release the heap lock if it was acquired */ - if (HeapLocked) - RtlLeaveHeapLock(Heap->LockVariable); - - return TRUE; -} - -/* - * @unimplemented - */ -NTSTATUS -NTAPI -RtlUsageHeap(IN HANDLE Heap, - IN ULONG Flags, - OUT PRTL_HEAP_USAGE Usage) -{ - /* TODO */ - UNIMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; -} - -PWSTR -NTAPI -RtlQueryTagHeap(IN PVOID HeapHandle, - IN ULONG Flags, - IN USHORT TagIndex, - IN BOOLEAN ResetCounters, - OUT PRTL_HEAP_TAG_INFO HeapTagInfo) -{ - /* TODO */ - UNIMPLEMENTED; - return NULL; -} - -ULONG -NTAPI -RtlExtendHeap(IN HANDLE Heap, - IN ULONG Flags, - IN PVOID P, - IN SIZE_T Size) -{ - /* TODO */ - UNIMPLEMENTED; - return 0; -} - -ULONG -NTAPI -RtlCreateTagHeap(IN HANDLE HeapHandle, - IN ULONG Flags, - IN PWSTR TagName, - IN PWSTR TagSubName) -{ - /* TODO */ - UNIMPLEMENTED; - return 0; -} - -NTSTATUS -NTAPI -RtlWalkHeap(IN HANDLE HeapHandle, - IN PVOID HeapEntry) -{ - UNIMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; -} - -PVOID -NTAPI -RtlProtectHeap(IN PVOID HeapHandle, - IN BOOLEAN ReadOnly) -{ - UNIMPLEMENTED; - return NULL; -} - -NTSTATUS -NTAPI -RtlSetHeapInformation(IN HANDLE HeapHandle OPTIONAL, - IN HEAP_INFORMATION_CLASS HeapInformationClass, - IN PVOID HeapInformation, - IN SIZE_T HeapInformationLength) -{ - /* Setting heap information is not really supported except for enabling LFH */ - if (HeapInformationClass == 0) return STATUS_SUCCESS; - - /* Check buffer length */ - if (HeapInformationLength < sizeof(ULONG)) - { - /* The provided buffer is too small */ - return STATUS_BUFFER_TOO_SMALL; - } - - /* Check for a special magic value for enabling LFH */ - if (*(PULONG)HeapInformation == 2) - { - DPRINT1("RtlSetHeapInformation() needs to enable LFH\n"); - return STATUS_SUCCESS; - } - - return STATUS_UNSUCCESSFUL; -} - -NTSTATUS -NTAPI -RtlQueryHeapInformation(HANDLE HeapHandle, - HEAP_INFORMATION_CLASS HeapInformationClass, - PVOID HeapInformation OPTIONAL, - SIZE_T HeapInformationLength OPTIONAL, - PSIZE_T ReturnLength OPTIONAL) -{ - PHEAP Heap = (PHEAP)HeapHandle; - - /* Only HeapCompatibilityInformation is supported */ - if (HeapInformationClass != HeapCompatibilityInformation) - return STATUS_UNSUCCESSFUL; - - /* Set result length */ - if (ReturnLength) *ReturnLength = sizeof(ULONG); - - /* Check buffer length */ - if (HeapInformationLength < sizeof(ULONG)) - { - /* It's too small, return needed length */ - return STATUS_BUFFER_TOO_SMALL; - } - - /* Return front end heap type */ - *(PULONG)HeapInformation = Heap->FrontEndHeapType; - - return STATUS_SUCCESS; -} - -NTSTATUS -NTAPI -RtlMultipleAllocateHeap(IN PVOID HeapHandle, - IN ULONG Flags, - IN SIZE_T Size, - IN ULONG Count, - OUT PVOID *Array) -{ - UNIMPLEMENTED; - return 0; -} - -NTSTATUS -NTAPI -RtlMultipleFreeHeap(IN PVOID HeapHandle, - IN ULONG Flags, - IN ULONG Count, - OUT PVOID *Array) -{ - UNIMPLEMENTED; - return 0; -} - -/* EOF */