- Enabled global pages if it is possible.

- Put the kernel map onto a fixed location and establish a page table
  for the first 4MB in the very early boot process.
- Disable the mapping for the page table pages within the bss section.

svn path=/trunk/; revision=10525
This commit is contained in:
Hartmut Birr 2004-08-14 09:17:05 +00:00
parent ca1a1af64c
commit ba5235cc90
7 changed files with 97 additions and 42 deletions

View file

@ -136,6 +136,11 @@ VOID
NtEarlyInitVdm(VOID);
#define X86_EFLAGS_ID (1 << 21)
#define X86_CR4_PGE (1 << 8)
#define X86_FEATURE_PGE (1 << 13)
#if defined(__GNUC__)
#define Ke386DisableInterrupts() __asm__("cli\n\t");
#define Ke386EnableInterrupts() __asm__("sti\n\t");
@ -146,6 +151,14 @@ NtEarlyInitVdm(VOID);
__asm__("movl %0,%%cr3\n\t" \
: /* no outputs */ \
: "r" (X));
#define Ke386SaveFlags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
#define Ke386RestoreFlags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
#define Ke386GetCr4() ({ \
unsigned int __d; \
__asm__("movl %%cr4,%0\n\t" :"=r" (__d)); \
__d; \
})
#define Ke386SetCr4(X) __asm__("movl %0,%%cr4": :"r" (X));
#elif defined(_MSC_VER)
#define Ke386DisableInterrupts() __asm cli
#define Ke386EnableInterrupts() __asm sti
@ -153,7 +166,7 @@ NtEarlyInitVdm(VOID);
#define Ke386GetPageTableDirectory(X) \
__asm mov eax, cr3; \
__asm mov X, eax;
#define Ke386GetPageTableDirectory(X) \
#define Ke386SetPageTableDirectory(X) \
__asm mov eax, X; \
__asm mov cr3, eax;
#else

View file

@ -38,6 +38,7 @@ extern ULONG MmPagedPoolSize;
* Maximum size of the kmalloc area (this is totally arbitary)
*/
#define MM_KERNEL_MAP_SIZE (16*1024*1024)
#define MM_KERNEL_MAP_BASE (0xf0c00000)
/*
* FIXME - different architectures have different cache line sizes...

View file

@ -44,8 +44,6 @@ static PFN_TYPE PcrPages[MAXIMUM_PROCESSORS];
ULONG Ke386CpuidFlags, Ke386CpuidFlags2, Ke386CpuidExFlags;
ULONG Ke386Cpuid = 300;
#define X86_EFLAGS_ID (1 << 21)
/* FUNCTIONS *****************************************************************/
VOID INIT_FUNCTION STATIC
@ -152,6 +150,12 @@ KeApplicationProcessorInit(VOID)
*/
Ki386InitializeLdt();
if (Ke386CpuidFlags & X86_FEATURE_PGE)
{
/* Enable global pages */
Ke386SetCr4(Ke386GetCr4() | X86_CR4_PGE);
}
/* Now we can enable interrupts. */
Ke386EnableInterrupts();
}
@ -193,6 +197,17 @@ KeInit1(VOID)
/* Get processor information. */
Ki386GetCpuId();
if (Ke386CpuidFlags & X86_FEATURE_PGE)
{
ULONG Flags;
/* Enable global pages */
Ke386SaveFlags(Flags);
Ke386DisableInterrupts();
Ke386SetCr4(Ke386GetCr4() | X86_CR4_PGE);
Ke386RestoreFlags(Flags);
}
}
VOID INIT_FUNCTION

View file

@ -27,6 +27,8 @@
.globl _unmap_me2
.globl _unmap_me3
.globl _unmap_me4
.globl _pagetable_start
.globl _pagetable_end
/*
* This is called by the realmode loader, with protected mode
@ -140,6 +142,7 @@ _multiboot_entry:
movl $(V2P(kernel_pagetable+31*4096) + 0x7), 0xC7c(%esi)
movl $(V2P(startup_pagedirectory) + 0x7), 0xF00(%esi)
movl $(V2P(kernelmap_pagetable) + 0x7), 0xF0C(%esi)
#ifdef MP
movl $(V2P(apic_pagetable) + 0x7), 0xFEC(%esi)
#endif /* MP */
@ -200,7 +203,7 @@ _multiboot_entry:
movl %eax, (%esi, %edi)
/*
* Initialize a part of the same pagetable to map the vga frame buffer.
* Initialize a part of the same pagetable to map the vga frame buffer (at FF3A0000).
*/
movl $0xa0007, %eax
movl $0x20, %ecx
@ -344,6 +347,7 @@ _multiboot_entry:
* segment
*/
.bss
_pagetable_start:
startup_pagedirectory:
.fill 4096, 1, 0
@ -353,6 +357,9 @@ lowmem_pagetable:
kernel_pagetable:
.fill 32*4096, 1, 0
kernelmap_pagetable:
.fill 4096, 1, 0
#ifdef MP
apic_pagetable:
.fill 4096, 1, 0
@ -360,7 +367,7 @@ apic_pagetable:
kpcr_pagetable:
.fill 4096, 1, 0
_pagetable_end:
_unmap_me:
.fill 4096, 1, 0

View file

@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $Id: page.c,v 1.68 2004/08/10 19:57:58 hbirr Exp $
/* $Id: page.c,v 1.69 2004/08/14 09:17:05 hbirr Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/i386/page.c
@ -46,6 +46,7 @@
#define PA_BIT_CD (4)
#define PA_BIT_ACCESSED (5)
#define PA_BIT_DIRTY (6)
#define PA_BIT_GLOBAL (8)
#define PA_PRESENT (1 << PA_BIT_PRESENT)
#define PA_READWRITE (1 << PA_BIT_READWRITE)
@ -54,6 +55,7 @@
#define PA_WT (1 << PA_BIT_WT)
#define PA_CD (1 << PA_BIT_CD)
#define PA_ACCESSED (1 << PA_BIT_ACCESSED)
#define PA_GLOBAL (1 << PA_BIT_GLOBAL)
#define PAGETABLE_MAP (0xf0000000)
#define PAGEDIRECTORY_MAP (0xf0000000 + (PAGETABLE_MAP / (1024)))
@ -73,6 +75,8 @@ __inline LARGE_INTEGER PTE_TO_PAGE(ULONG npage)
}
#endif
extern ULONG Ke386CpuidFlags;
/* FUNCTIONS ***************************************************************/
PULONG
@ -260,6 +264,7 @@ static PULONG MmGetPageEntry(PVOID PAddress, BOOL CreatePde)
{
PULONG Pde, kePde;
PFN_TYPE Pfn;
ULONG Attributes;
NTSTATUS Status;
DPRINT("MmGetPageEntry(Address %x)\n", PAddress);
@ -281,7 +286,12 @@ static PULONG MmGetPageEntry(PVOID PAddress, BOOL CreatePde)
{
KEBUGCHECK(0);
}
if (0 == InterlockedCompareExchange(kePde, PFN_TO_PTE(Pfn) | PA_PRESENT | PA_READWRITE, 0))
Attributes = PA_PRESENT | PA_READWRITE;
if (Ke386CpuidFlags & X86_FEATURE_PGE)
{
Attributes |= PA_GLOBAL;
}
if (0 == InterlockedCompareExchange(kePde, PFN_TO_PTE(Pfn) | Attributes, 0))
{
Pfn = 0;
}
@ -808,6 +818,10 @@ MmCreateVirtualMappingForKernel(PVOID Address,
}
Attributes = ProtectToPTE(flProtect);
if (Ke386CpuidFlags & X86_FEATURE_PGE)
{
Attributes |= PA_GLOBAL;
}
Addr = Address;
for (i = 0; i < PageCount; i++, Addr += PAGE_SIZE)
{
@ -947,6 +961,10 @@ MmCreateVirtualMappingUnsafe(PEPROCESS Process,
if (Address >= (PVOID)KERNEL_BASE)
{
Attributes &= ~PA_USER;
if (Ke386CpuidFlags & X86_FEATURE_PGE)
{
Attributes |= PA_GLOBAL;
}
}
else
{
@ -1083,6 +1101,10 @@ MmSetPageProtect(PEPROCESS Process, PVOID Address, ULONG flProtect)
if (Address >= (PVOID)KERNEL_BASE)
{
Attributes &= ~PA_USER;
if (Ke386CpuidFlags & X86_FEATURE_PGE)
{
Attributes |= PA_GLOBAL;
}
}
else
{
@ -1162,6 +1184,10 @@ MmInitGlobalKernelPageDirectory(VOID)
0 == MmGlobalKernelPageDirectory[i] && 0 != CurrentPageDirectory[i])
{
MmGlobalKernelPageDirectory[i] = CurrentPageDirectory[i];
if (Ke386CpuidFlags & X86_FEATURE_PGE)
{
MmGlobalKernelPageDirectory[i] |= PA_GLOBAL;
}
}
}
}

View file

@ -1,4 +1,4 @@
/* $Id: kmap.c,v 1.33 2004/08/01 07:24:58 hbirr Exp $
/* $Id: kmap.c,v 1.34 2004/08/14 09:17:05 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
@ -31,16 +31,13 @@ static RTL_BITMAP AllocMap;
static KSPIN_LOCK AllocMapLock;
static ULONG AllocMapHint = 0;
extern PVOID MiKernelMapStart;
extern ULONG MiKernelMapLength;
/* FUNCTIONS ***************************************************************/
VOID
ExUnmapPage(PVOID Addr)
{
KIRQL oldIrql;
ULONG Base = ((char*)Addr - (char*)MiKernelMapStart) / PAGE_SIZE;
ULONG Base = ((char*)Addr - (char*)MM_KERNEL_MAP_BASE) / PAGE_SIZE;
DPRINT("ExUnmapPage(Addr %x)\n",Addr);
@ -110,7 +107,7 @@ ExAllocatePageWithPhysPage(PFN_TYPE Page)
{
AllocMapHint = Base + 1;
KeReleaseSpinLock(&AllocMapLock, oldlvl);
Addr = (char*)MiKernelMapStart + Base * PAGE_SIZE;
Addr = (char*)MM_KERNEL_MAP_BASE + Base * PAGE_SIZE;
Status = MmCreateVirtualMapping(NULL,
Addr,
PAGE_READWRITE | PAGE_SYSTEM,
@ -139,7 +136,7 @@ VOID
MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free)
{
ULONG i;
ULONG Base = ((char*)Addr - (char*)MiKernelMapStart) / PAGE_SIZE;
ULONG Base = ((char*)Addr - (char*)MM_KERNEL_MAP_BASE) / PAGE_SIZE;
KIRQL oldlvl;
for (i = 0; i < Count; i++)
@ -178,7 +175,7 @@ MiAllocNonPagedPoolRegion(ULONG nr_pages)
}
KeReleaseSpinLock(&AllocMapLock, oldlvl);
//DPRINT("returning %x\n",NonPagedPoolBase + Base * PAGE_SIZE);
return (char*)MiKernelMapStart + Base * PAGE_SIZE;
return (char*)MM_KERNEL_MAP_BASE + Base * PAGE_SIZE;
}

View file

@ -1,4 +1,4 @@
/* $Id: mminit.c,v 1.64 2004/08/01 07:24:58 hbirr Exp $
/* $Id: mminit.c,v 1.65 2004/08/14 09:17:05 hbirr Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
@ -56,8 +56,8 @@ PHYSICAL_ADDRESS MmSharedDataPagePhysicalAddress;
PVOID MiNonPagedPoolStart;
ULONG MiNonPagedPoolLength;
PVOID MiKernelMapStart;
ULONG MiKernelMapLength;
//PVOID MiKernelMapStart;
/* FUNCTIONS ****************************************************************/
@ -104,18 +104,12 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
MmInitMemoryAreas();
/* Don't change the start of kernel map. Pte's must always exist for this region. */
MiKernelMapStart = (char*)LastKernelAddress + PAGE_SIZE;
MiKernelMapLength = MM_KERNEL_MAP_SIZE;
MiNonPagedPoolStart = (char*)MiKernelMapStart + MiKernelMapLength + PAGE_SIZE;
MiNonPagedPoolStart = (char*)LastKernelAddress + PAGE_SIZE;
MiNonPagedPoolLength = MM_NONPAGED_POOL_SIZE;
MmPagedPoolBase = (char*)MiNonPagedPoolStart + MiNonPagedPoolLength + PAGE_SIZE;
MmPagedPoolSize = MM_PAGED_POOL_SIZE;
MiInitKernelMap();
MiInitializeNonPagedPool();
/*
@ -129,7 +123,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
0x400000,
0,
&kernel_map_desc,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
@ -141,7 +135,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
PAGE_SIZE * MAXIMUM_PROCESSORS,
0,
&kernel_kpcr_desc,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
@ -153,7 +147,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
0x20000,
0,
&kernel_mapped_vga_framebuffer_desc,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
@ -172,7 +166,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
Length,
0,
&kernel_text_desc,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
@ -189,7 +183,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
Length,
0,
&kernel_init_desc,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
@ -211,7 +205,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
Length,
0,
&kernel_data_desc,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
@ -224,7 +218,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
Length,
0,
&kernel_param_desc,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
@ -236,19 +230,19 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
MiNonPagedPoolLength,
0,
&kernel_pool_desc,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
BaseAddress = MiKernelMapStart;
BaseAddress = (PVOID)MM_KERNEL_MAP_BASE;
Status = MmCreateMemoryArea(NULL,
MmGetKernelAddressSpace(),
MEMORY_AREA_SYSTEM,
&BaseAddress,
MiKernelMapLength,
MM_KERNEL_MAP_SIZE,
0,
&MiKernelMapDescriptor,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
@ -260,7 +254,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
MmPagedPoolSize,
0,
&MiPagedPoolDescriptor,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
@ -278,7 +272,7 @@ MmInitVirtualMemory(ULONG LastKernelAddress,
Length,
0,
&kernel_shared_data_desc,
FALSE,
TRUE,
FALSE,
BoundaryAddressMultiple);
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Pfn);
@ -411,6 +405,9 @@ MmInit1(ULONG FirstKrnlPhysAddr,
MmStats.NrTotalPages += 16;
#endif
MiInitKernelMap();
DbgPrint("Used memory %dKb\n", (MmStats.NrTotalPages * PAGE_SIZE) / 1024);
LastKernelAddress = (ULONG)MmInitializePageList((PVOID)FirstKrnlPhysAddr,
@ -442,11 +439,10 @@ MmInit1(ULONG FirstKrnlPhysAddr,
MmRawDeleteVirtualMapping((PVOID)(i));
}
DPRINT("Invalidating between %x and %x\n",
0xd0100000, 0xd0400000);
for (i=0xd0100000; i<0xd0400000; i+=PAGE_SIZE)
extern unsigned int pagetable_start, pagetable_end;
for (i = (ULONG_PTR)&pagetable_start; i < (ULONG_PTR)&pagetable_end; i += PAGE_SIZE)
{
MmRawDeleteVirtualMapping((PVOID)(i));
MmDeleteVirtualMapping(NULL, (PVOID)i, FALSE, NULL, NULL);
}
DPRINT("Almost done MmInit()\n");