[NTOSKRNL]

Bring over all current amd64 specific code from the amd64 branch

svn path=/trunk/; revision=48222
This commit is contained in:
Timo Kreuzer 2010-07-24 01:12:13 +00:00
parent 869cdbce37
commit bfc2638590
23 changed files with 10352 additions and 1 deletions

View file

@ -0,0 +1,114 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: BSD - See COPYING.ARM in the top level directory
* FILE: ntoskrnl/ex/fastinterlck.c
* PURPOSE: Portable Ex*Interlocked and REGISTER routines for amd64
* PROGRAMMERS: ReactOS Portable Systems Group
* Timo Kreuzer
*/
/* INCLUDES *******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#undef ExInterlockedAddLargeInteger
#undef ExInterlockedAddUlong
#undef ExInterlockedExtendZone
#undef ExInterlockedInsertHeadList
#undef ExInterlockedInsertTailList
#undef ExInterlockedPopEntryList
#undef ExInterlockedPushEntryList
#undef ExInterlockedRemoveHeadList
#undef ExpInterlockedFlushSList
#undef ExpInterlockedPopEntrySList
#undef ExpInterlockedPushEntrySList
/* FUNCTIONS ******************************************************************/
LARGE_INTEGER
ExInterlockedAddLargeInteger(IN PLARGE_INTEGER Addend,
IN LARGE_INTEGER Increment,
IN PKSPIN_LOCK Lock)
{
LARGE_INTEGER Int;
Int.QuadPart = _InterlockedExchangeAdd64(&Addend->QuadPart,
Increment.QuadPart);
return Int;
}
ULONG
ExInterlockedAddUlong(IN PULONG Addend,
IN ULONG Increment,
PKSPIN_LOCK Lock)
{
return (ULONG)_InterlockedExchangeAdd((PLONG)Addend, Increment);
}
PLIST_ENTRY
ExInterlockedInsertHeadList(IN PLIST_ENTRY ListHead,
IN PLIST_ENTRY ListEntry,
IN PKSPIN_LOCK Lock)
{
KIRQL OldIrql;
PLIST_ENTRY OldHead = NULL;
KeAcquireSpinLock(Lock, &OldIrql);
if (!IsListEmpty(ListHead)) OldHead = ListEntry->Flink;
InsertHeadList(ListHead, ListEntry);
KeReleaseSpinLock(Lock, OldIrql);
return OldHead;
}
PLIST_ENTRY
ExInterlockedInsertTailList(IN PLIST_ENTRY ListHead,
IN PLIST_ENTRY ListEntry,
IN PKSPIN_LOCK Lock)
{
KIRQL OldIrql;
PLIST_ENTRY OldHead = NULL;
KeAcquireSpinLock(Lock, &OldIrql);
if (!IsListEmpty(ListHead)) OldHead = ListEntry->Blink;
InsertTailList(ListHead, ListEntry);
KeReleaseSpinLock(Lock, OldIrql);
return OldHead;
}
PSINGLE_LIST_ENTRY
ExInterlockedPopEntryList(IN PSINGLE_LIST_ENTRY ListHead,
IN PKSPIN_LOCK Lock)
{
KIRQL OldIrql;
PSINGLE_LIST_ENTRY FirstEntry;
KeAcquireSpinLock(Lock, &OldIrql);
FirstEntry = PopEntryList(ListHead);
KeReleaseSpinLock(Lock, OldIrql);
return FirstEntry;
}
PSINGLE_LIST_ENTRY
ExInterlockedPushEntryList(IN PSINGLE_LIST_ENTRY ListHead,
IN PSINGLE_LIST_ENTRY ListEntry,
IN PKSPIN_LOCK Lock)
{
KIRQL OldIrql;
PSINGLE_LIST_ENTRY OldHead = NULL;
KeAcquireSpinLock(Lock, &OldIrql);
OldHead = ListHead->Next;
PushEntryList(ListHead, ListEntry);
KeReleaseSpinLock(Lock, OldIrql);
return OldHead;
}
PLIST_ENTRY
ExInterlockedRemoveHeadList(IN PLIST_ENTRY ListHead,
IN PKSPIN_LOCK Lock)
{
KIRQL OldIrql;
PLIST_ENTRY OldHead = NULL;
KeAcquireSpinLock(Lock, &OldIrql);
if (!IsListEmpty(ListHead)) OldHead = RemoveHeadList(ListHead);
KeReleaseSpinLock(Lock, OldIrql);
return OldHead;
}

View file

@ -0,0 +1,23 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ke/i386/cpu.c
* PURPOSE: Routines for CPU-level support
* PROGRAMMERS: Timo Kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* FUNCTIONS *****************************************************************/
VOID
NTAPI
KdpGdbStubInit(PKD_DISPATCH_TABLE WrapperTable,
ULONG BootPhase)
{
UNIMPLEMENTED;
}

View file

@ -0,0 +1,217 @@
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS Kernel
* FILE: ntoskrnl/kd/amd64/kdmemsup.c
* PURPOSE: Kernel Debugger Safe Memory Support
*
* PROGRAMMERS: arty
*/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#define HIGH_PHYS_MASK 0x80000000
#define PAGE_TABLE_MASK 0x3ff
#define BIG_PAGE_SIZE (1<<22)
#define CR4_PAGE_SIZE_BIT 0x10
#define PDE_PRESENT_BIT 0x01
#define PDE_W_BIT 0x02
#define PDE_PWT_BIT 0x08
#define PDE_PCD_BIT 0x10
#define PDE_ACCESSED_BIT 0x20
#define PDE_DIRTY_BIT 0x40
#define PDE_PS_BIT 0x80
#define MI_KDBG_TMP_PAGE_1 (HYPER_SPACE + 0x400000 - PAGE_SIZE)
#define MI_KDBG_TMP_PAGE_0 (MI_KDBG_TMP_PAGE_1 - PAGE_SIZE)
/* VARIABLES ***************************************************************/
static BOOLEAN KdpPhysAccess = FALSE;
static
ULONG_PTR
KdpPhysMap(ULONG_PTR PhysAddr, LONG Len)
{
MMPTE TempPte;
PMMPTE PointerPte;
ULONG_PTR VirtAddr;
TempPte.u.Long = PDE_PRESENT_BIT | PDE_W_BIT | PDE_PWT_BIT |
PDE_PCD_BIT | PDE_ACCESSED_BIT | PDE_DIRTY_BIT;
if ((PhysAddr & (PAGE_SIZE - 1)) + Len > PAGE_SIZE)
{
TempPte.u.Hard.PageFrameNumber = (PhysAddr >> PAGE_SHIFT) + 1;
PointerPte = MiAddressToPte((PVOID)MI_KDBG_TMP_PAGE_1);
*PointerPte = TempPte;
VirtAddr = (ULONG_PTR)PointerPte << 10;
KeInvalidateTlbEntry((PVOID)VirtAddr);
}
TempPte.u.Hard.PageFrameNumber = PhysAddr >> PAGE_SHIFT;
PointerPte = MiAddressToPte((PVOID)MI_KDBG_TMP_PAGE_0);
*PointerPte = TempPte;
VirtAddr = (ULONG_PTR)PointerPte << 10;
KeInvalidateTlbEntry((PVOID)VirtAddr);
return VirtAddr + (PhysAddr & (PAGE_SIZE - 1));
}
static
ULONGLONG
KdpPhysRead(ULONG_PTR PhysAddr, LONG Len)
{
ULONG_PTR Addr;
ULONGLONG Result = 0;
Addr = KdpPhysMap(PhysAddr, Len);
switch (Len)
{
case 8:
Result = *((PULONGLONG)Addr);
break;
case 4:
Result = *((PULONG)Addr);
break;
case 2:
Result = *((PUSHORT)Addr);
break;
case 1:
Result = *((PUCHAR)Addr);
break;
}
return Result;
}
static
VOID
KdpPhysWrite(ULONG_PTR PhysAddr, LONG Len, ULONGLONG Value)
{
ULONG_PTR Addr;
Addr = KdpPhysMap(PhysAddr, Len);
switch (Len)
{
case 8:
*((PULONGLONG)Addr) = Value;
break;
case 4:
*((PULONG)Addr) = Value;
break;
case 2:
*((PUSHORT)Addr) = Value;
break;
case 1:
*((PUCHAR)Addr) = Value;
break;
}
}
BOOLEAN
NTAPI
KdpTranslateAddress(ULONG_PTR Addr, PULONG_PTR ResultAddr)
{
ULONG_PTR CR3Value = __readcr3();
ULONG_PTR CR4Value = __readcr4();
ULONG_PTR PageDirectory = (CR3Value & ~(PAGE_SIZE-1)) +
((Addr >> 22) * sizeof(ULONG));
ULONG_PTR PageDirectoryEntry = KdpPhysRead(PageDirectory, sizeof(ULONG));
/* Not present -> fail */
if (!(PageDirectoryEntry & PDE_PRESENT_BIT))
{
return FALSE;
}
/* Big Page? */
if ((PageDirectoryEntry & PDE_PS_BIT) && (CR4Value & CR4_PAGE_SIZE_BIT))
{
*ResultAddr = (PageDirectoryEntry & ~(BIG_PAGE_SIZE-1)) +
(Addr & (BIG_PAGE_SIZE-1));
return TRUE;
}
else
{
ULONG_PTR PageTableAddr =
(PageDirectoryEntry & ~(PAGE_SIZE-1)) +
((Addr >> PAGE_SHIFT) & PAGE_TABLE_MASK) * sizeof(ULONG);
ULONG_PTR PageTableEntry = KdpPhysRead(PageTableAddr, sizeof(ULONG));
if (PageTableEntry & PDE_PRESENT_BIT)
{
*ResultAddr = (PageTableEntry & ~(PAGE_SIZE-1)) +
(Addr & (PAGE_SIZE-1));
return TRUE;
}
}
return FALSE;
}
BOOLEAN
NTAPI
KdpSafeReadMemory(ULONG_PTR Addr, LONG Len, PVOID Value)
{
ULONG_PTR ResultPhysAddr;
if (!KdpPhysAccess)
{
memcpy(Value, (PVOID)Addr, Len);
return TRUE;
}
memset(Value, 0, Len);
if (!KdpTranslateAddress(Addr, &ResultPhysAddr))
return FALSE;
switch (Len)
{
case 8:
*((PULONGLONG)Value) = KdpPhysRead(ResultPhysAddr, Len);
break;
case 4:
*((PULONG)Value) = KdpPhysRead(ResultPhysAddr, Len);
break;
case 2:
*((PUSHORT)Value) = KdpPhysRead(ResultPhysAddr, Len);
break;
case 1:
*((PUCHAR)Value) = KdpPhysRead(ResultPhysAddr, Len);
break;
}
return TRUE;
}
BOOLEAN
NTAPI
KdpSafeWriteMemory(ULONG_PTR Addr, LONG Len, ULONGLONG Value)
{
ULONG_PTR ResultPhysAddr;
if (!KdpPhysAccess)
{
memcpy((PVOID)Addr, &Value, Len);
return TRUE;
}
if (!KdpTranslateAddress(Addr, &ResultPhysAddr))
return FALSE;
KdpPhysWrite(ResultPhysAddr, Len, Value);
return TRUE;
}
VOID
NTAPI
KdpEnableSafeMem(VOID)
{
KdpPhysAccess = TRUE;
}
/* EOF */

View file

@ -0,0 +1,330 @@
/* Interface between the opcode library and its callers.
Copyright 2001, 2002 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Written by Cygnus Support, 1993.
The opcode library (libopcodes.a) provides instruction decoders for
a large variety of instruction sets, callable with an identical
interface, for making instruction-processing programs more independent
of the instruction set being processed. */
#ifndef DIS_ASM_H
#define DIS_ASM_H
#ifdef __cplusplus
extern "C" {
#endif
#if 0
/* #include <stdio.h> */
/* #include "bfd.h" */
#endif
typedef int (*fprintf_ftype) PARAMS((PTR, const char*, ...));
enum dis_insn_type {
dis_noninsn, /* Not a valid instruction */
dis_nonbranch, /* Not a branch instruction */
dis_branch, /* Unconditional branch */
dis_condbranch, /* Conditional branch */
dis_jsr, /* Jump to subroutine */
dis_condjsr, /* Conditional jump to subroutine */
dis_dref, /* Data reference instruction */
dis_dref2 /* Two data references in instruction */
};
/* This struct is passed into the instruction decoding routine,
and is passed back out into each callback. The various fields are used
for conveying information from your main routine into your callbacks,
for passing information into the instruction decoders (such as the
addresses of the callback functions), or for passing information
back from the instruction decoders to their callers.
It must be initialized before it is first passed; this can be done
by hand, or using one of the initialization macros below. */
typedef struct disassemble_info {
fprintf_ftype fprintf_func;
PTR stream;
PTR application_data;
/* Target description. We could replace this with a pointer to the bfd,
but that would require one. There currently isn't any such requirement
so to avoid introducing one we record these explicitly. */
/* The bfd_flavour. This can be bfd_target_unknown_flavour. */
enum bfd_flavour flavour;
/* The bfd_arch value. */
enum bfd_architecture arch;
/* The bfd_mach value. */
unsigned long mach;
#if 0
enum bfd_endian endian;
#endif
/* An arch/mach-specific bitmask of selected instruction subsets, mainly
for processors with run-time-switchable instruction sets. The default,
zero, means that there is no constraint. CGEN-based opcodes ports
may use ISA_foo masks. */
unsigned long insn_sets;
#if 0
/* Some targets need information about the current section to accurately
display insns. If this is NULL, the target disassembler function
will have to make its best guess. */
asection *section;
/* An array of pointers to symbols either at the location being disassembled
or at the start of the function being disassembled. The array is sorted
so that the first symbol is intended to be the one used. The others are
present for any misc. purposes. This is not set reliably, but if it is
not NULL, it is correct. */
asymbol **symbols;
/* Number of symbols in array. */
int num_symbols;
#endif
/* For use by the disassembler.
The top 16 bits are reserved for public use (and are documented here).
The bottom 16 bits are for the internal use of the disassembler. */
unsigned long flags;
#define INSN_HAS_RELOC 0x80000000
PTR private_data;
/* Function used to get bytes to disassemble. MEMADDR is the
address of the stuff to be disassembled, MYADDR is the address to
put the bytes in, and LENGTH is the number of bytes to read.
INFO is a pointer to this struct.
Returns an errno value or 0 for success. */
int (*read_memory_func)
PARAMS ((bfd_vma memaddr, bfd_byte *myaddr, unsigned int length,
struct disassemble_info *info));
/* Function which should be called if we get an error that we can't
recover from. STATUS is the errno value from read_memory_func and
MEMADDR is the address that we were trying to read. INFO is a
pointer to this struct. */
void (*memory_error_func)
PARAMS ((int status, bfd_vma memaddr, struct disassemble_info *info));
/* Function called to print ADDR. */
void (*print_address_func)
PARAMS ((bfd_vma addr, struct disassemble_info *info));
/* Function called to determine if there is a symbol at the given ADDR.
If there is, the function returns 1, otherwise it returns 0.
This is used by ports which support an overlay manager where
the overlay number is held in the top part of an address. In
some circumstances we want to include the overlay number in the
address, (normally because there is a symbol associated with
that address), but sometimes we want to mask out the overlay bits. */
int (* symbol_at_address_func)
PARAMS ((bfd_vma addr, struct disassemble_info * info));
/* These are for buffer_read_memory. */
bfd_byte *buffer;
bfd_vma buffer_vma;
unsigned int buffer_length;
/* This variable may be set by the instruction decoder. It suggests
the number of bytes objdump should display on a single line. If
the instruction decoder sets this, it should always set it to
the same value in order to get reasonable looking output. */
int bytes_per_line;
/* the next two variables control the way objdump displays the raw data */
/* For example, if bytes_per_line is 8 and bytes_per_chunk is 4, the */
/* output will look like this:
00: 00000000 00000000
with the chunks displayed according to "display_endian". */
int bytes_per_chunk;
enum bfd_endian display_endian;
/* Number of octets per incremented target address
Normally one, but some DSPs have byte sizes of 16 or 32 bits. */
unsigned int octets_per_byte;
/* Results from instruction decoders. Not all decoders yet support
this information. This info is set each time an instruction is
decoded, and is only valid for the last such instruction.
To determine whether this decoder supports this information, set
insn_info_valid to 0, decode an instruction, then check it. */
char insn_info_valid; /* Branch info has been set. */
char branch_delay_insns; /* How many sequential insn's will run before
a branch takes effect. (0 = normal) */
char data_size; /* Size of data reference in insn, in bytes */
enum dis_insn_type insn_type; /* Type of instruction */
bfd_vma target; /* Target address of branch or dref, if known;
zero if unknown. */
bfd_vma target2; /* Second target address for dref2 */
/* Command line options specific to the target disassembler. */
char * disassembler_options;
} disassemble_info;
/* Standard disassemblers. Disassemble one instruction at the given
target address. Return number of octets processed. */
typedef int (*disassembler_ftype)
PARAMS((bfd_vma, disassemble_info *));
extern int print_insn_big_mips PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_little_mips PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_i386 PARAMS ((bfd_vma, disassemble_info *));
extern int print_insn_i386_att PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_i386_intel PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_ia64 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_i370 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_m68hc11 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_m68hc12 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_m68k PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_z8001 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_z8002 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_h8300 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_h8300h PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_h8300s PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_h8500 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_alpha PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_big_arm PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_little_arm PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_sparc PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_big_a29k PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_little_a29k PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_avr PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_d10v PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_d30v PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_dlx PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_fr30 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_hppa PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_i860 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_i960 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_ip2k PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_m32r PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_m88k PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_mcore PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_mmix PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_mn10200 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_mn10300 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_msp430 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_ns32k PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_openrisc PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_big_or32 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_little_or32 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_pdp11 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_pj PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_big_powerpc PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_little_powerpc PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_rs6000 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_s390 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_sh PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_tic30 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_tic4x PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_tic54x PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_tic80 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_v850 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_vax PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_w65 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_xstormy16 PARAMS ((bfd_vma, disassemble_info*));
extern int print_insn_sh64 PARAMS ((bfd_vma, disassemble_info *));
extern int print_insn_sh64x_media PARAMS ((bfd_vma, disassemble_info *));
extern int print_insn_frv PARAMS ((bfd_vma, disassemble_info *));
extern int print_insn_iq2000 PARAMS ((bfd_vma, disassemble_info *));
extern disassembler_ftype arc_get_disassembler PARAMS ((void *));
extern disassembler_ftype cris_get_disassembler PARAMS ((bfd *));
extern void print_mips_disassembler_options PARAMS ((FILE *));
extern void print_ppc_disassembler_options PARAMS ((FILE *));
extern void print_arm_disassembler_options PARAMS ((FILE *));
extern void parse_arm_disassembler_option PARAMS ((char *));
extern int get_arm_regname_num_options PARAMS ((void));
extern int set_arm_regname_option PARAMS ((int));
extern int get_arm_regnames PARAMS ((int, const char **, const char **, const char ***));
/* Fetch the disassembler for a given BFD, if that support is available. */
extern disassembler_ftype disassembler PARAMS ((bfd *));
/* Document any target specific options available from the disassembler. */
extern void disassembler_usage PARAMS ((FILE *));
/* This block of definitions is for particular callers who read instructions
into a buffer before calling the instruction decoder. */
/* Here is a function which callers may wish to use for read_memory_func.
It gets bytes from a buffer. */
extern int buffer_read_memory
PARAMS ((bfd_vma, bfd_byte *, unsigned int, struct disassemble_info *));
/* This function goes with buffer_read_memory.
It prints a message using info->fprintf_func and info->stream. */
extern void perror_memory PARAMS ((int, bfd_vma, struct disassemble_info *));
/* Just print the address in hex. This is included for completeness even
though both GDB and objdump provide their own (to print symbolic
addresses). */
extern void generic_print_address
PARAMS ((bfd_vma, struct disassemble_info *));
/* Always true. */
extern int generic_symbol_at_address
PARAMS ((bfd_vma, struct disassemble_info *));
/* Macro to initialize a disassemble_info struct. This should be called
by all applications creating such a struct. */
#define INIT_DISASSEMBLE_INFO(INFO, STREAM, FPRINTF_FUNC) \
(INFO).flavour = bfd_target_unknown_flavour, \
(INFO).arch = bfd_arch_unknown, \
(INFO).mach = 0, \
(INFO).insn_sets = 0, \
(INFO).endian = BFD_ENDIAN_UNKNOWN, \
(INFO).octets_per_byte = 1, \
INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC)
/* Call this macro to initialize only the internal variables for the
disassembler. Architecture dependent things such as byte order, or machine
variant are not touched by this macro. This makes things much easier for
GDB which must initialize these things separately. */
#define INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC) \
(INFO).fprintf_func = (fprintf_ftype)(FPRINTF_FUNC), \
(INFO).stream = (PTR)(STREAM), \
(INFO).section = NULL, \
(INFO).symbols = NULL, \
(INFO).num_symbols = 0, \
(INFO).private_data = NULL, \
(INFO).buffer = NULL, \
(INFO).buffer_vma = 0, \
(INFO).buffer_length = 0, \
(INFO).read_memory_func = buffer_read_memory, \
(INFO).memory_error_func = perror_memory, \
(INFO).print_address_func = generic_print_address, \
(INFO).symbol_at_address_func = generic_symbol_at_address, \
(INFO).flags = 0, \
(INFO).bytes_per_line = 0, \
(INFO).bytes_per_chunk = 0, \
(INFO).display_endian = BFD_ENDIAN_UNKNOWN, \
(INFO).disassembler_options = NULL, \
(INFO).insn_info_valid = 0
#ifdef __cplusplus
}
#endif
#endif /* ! defined (DIS_ASM_H) */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,111 @@
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/kdbg/amd64/kdb.c
* PURPOSE: Kernel Debugger
* PROGRAMMERS:
*/
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
ULONG
NTAPI
KiEspFromTrapFrame(IN PKTRAP_FRAME TrapFrame)
{
return TrapFrame->Rsp;
}
VOID
NTAPI
KiEspToTrapFrame(IN PKTRAP_FRAME TrapFrame,
IN ULONG_PTR Esp)
{
KIRQL OldIrql;
ULONG Previous;
/* Raise to APC_LEVEL if needed */
OldIrql = KeGetCurrentIrql();
if (OldIrql < APC_LEVEL) KeRaiseIrql(APC_LEVEL, &OldIrql);
/* Get the old ESP */
Previous = KiEspFromTrapFrame(TrapFrame);
/* Check if this is user-mode */
if ((TrapFrame->SegCs & MODE_MASK))
{
/* Write it directly */
TrapFrame->Rsp = Esp;
}
else
{
/* Don't allow ESP to be lowered, this is illegal */
if (Esp < Previous) KeBugCheckEx(SET_OF_INVALID_CONTEXT,
Esp,
Previous,
(ULONG_PTR)TrapFrame,
0);
/* Create an edit frame, check if it was alrady */
if (!(TrapFrame->SegCs & FRAME_EDITED))
{
/* Update the value */
TrapFrame->Rsp = Esp;
}
else
{
/* Check if ESP changed */
if (Previous != Esp)
{
/* Save CS */
TrapFrame->SegCs &= ~FRAME_EDITED;
/* Save ESP */
TrapFrame->Rsp = Esp;
}
}
}
/* Restore IRQL */
if (OldIrql < APC_LEVEL) KeLowerIrql(OldIrql);
}
ULONG
NTAPI
KiSsFromTrapFrame(IN PKTRAP_FRAME TrapFrame)
{
if (TrapFrame->SegCs & MODE_MASK)
{
/* User mode, return the User SS */
return TrapFrame->SegSs | RPL_MASK;
}
else
{
/* Kernel mode */
return KGDT64_SYS_TSS;
}
}
VOID
NTAPI
KiSsToTrapFrame(IN PKTRAP_FRAME TrapFrame,
IN ULONG Ss)
{
/* Remove the high-bits */
Ss &= 0xFFFF;
if (TrapFrame->SegCs & MODE_MASK)
{
/* Usermode, save the User SS */
TrapFrame->SegSs = Ss | RPL_MASK;
}
}

View file

@ -0,0 +1,152 @@
#include <reactos/asm.h>
#include <ndk/amd64/asm.h>
.globl KdbEnter
KdbEnter:
/* save flags */
pushfq
// .pushreg ?
/* Make room for a KTRAP_FRAME */
sub rsp, SIZE_KTRAP_FRAME
// .allocstack SIZE_KTRAP_FRAME
/* Save rbp */
mov [rsp + KTRAP_FRAME_Rbp], rbp
/* Save non-volatile registers */
mov [rsp + KTRAP_FRAME_Rbx], rbx
mov [rsp + KTRAP_FRAME_Rdi], rdi
mov [rsp + KTRAP_FRAME_Rsi], rsi
/* Save volatile registers */
mov [rsp + KTRAP_FRAME_Rax], rax
mov [rsp + KTRAP_FRAME_Rcx], rcx
mov [rsp + KTRAP_FRAME_Rdx], rdx
mov [rsp + KTRAP_FRAME_R8], r8
mov [rsp + KTRAP_FRAME_R9], r9
mov [rsp + KTRAP_FRAME_R10], r10
mov [rsp + KTRAP_FRAME_R11], r11
/* Save xmm registers */
movdqa [rsp + KTRAP_FRAME_Xmm0], xmm0
movdqa [rsp + KTRAP_FRAME_Xmm1], xmm1
movdqa [rsp + KTRAP_FRAME_Xmm2], xmm2
movdqa [rsp + KTRAP_FRAME_Xmm3], xmm3
movdqa [rsp + KTRAP_FRAME_Xmm4], xmm4
movdqa [rsp + KTRAP_FRAME_Xmm5], xmm5
/* Save cs and previous mode */
mov ax, cs
mov [rsp + KTRAP_FRAME_SegCs], ax
and ax, 1
mov [rsp + KTRAP_FRAME_PreviousMode], al
/* Save segment selectors */
mov ax, ds
mov [rsp + KTRAP_FRAME_SegDs], ax
mov ax, es
mov [rsp + KTRAP_FRAME_SegEs], ax
mov ax, fs
mov [rsp + KTRAP_FRAME_SegFs], ax
mov ax, gs
mov [rsp + KTRAP_FRAME_SegGs], ax
/* Save previous irql */
mov rax, cr8
mov [rsp + KTRAP_FRAME_PreviousIrql], al
/* Save debug registers */
mov rax, dr0
mov [rsp + KTRAP_FRAME_Dr0], rax
mov rax, dr1
mov [rsp + KTRAP_FRAME_Dr1], rax
mov rax, dr2
mov [rsp + KTRAP_FRAME_Dr2], rax
mov rax, dr3
mov [rsp + KTRAP_FRAME_Dr3], rax
mov rax, dr6
mov [rsp + KTRAP_FRAME_Dr6], rax
mov rax, dr7
mov [rsp + KTRAP_FRAME_Dr7], rax
/* Point rbp, where rsp was before */
lea rbp, [rsp + SIZE_KTRAP_FRAME]
mov [rsp + KTRAP_FRAME_Rsp], rbp
/* Store the EFLAGS we previously pushed on the stack */
mov rax, [rbp + 8]
mov [rsp + KTRAP_FRAME_EFlags], rax
/* Get RIP from the stack */
mov rax, [rbp + 16]
mov [rsp + KTRAP_FRAME_Rip], rax
/* Make sure the direction flag is cleared */
cld
/* Clear all breakpoint enables in dr7. */
mov rax, dr7
and rax, 0xFFFF0000
mov dr7, rax
/* Call KDB */
mov byte ptr [rsp + KTRAP_FRAME_P5], 1 /* FirstChance */
mov r9, rsp /* Pointer to the trap frame */
mov r8, 0 /* Context */
mov dl, 0 /* PreviousMode (KernelMode) */
mov rcx, 0 /* ExceptionRecord */
call KdbEnterDebuggerException
/* Restore segment selectors */
mov ax, [rsp + KTRAP_FRAME_SegDs]
mov ds, ax
mov ax, [rsp + KTRAP_FRAME_SegEs]
mov es, ax
mov ax, [rsp + KTRAP_FRAME_SegFs]
mov fs, ax
/* Restore non-volatile registers */
mov rbx, [rsp + KTRAP_FRAME_Rbx]
mov rdi, [rsp + KTRAP_FRAME_Rdi]
mov rsi, [rsp + KTRAP_FRAME_Rsi]
/* Restore volatile registers */
mov rax, [rsp + KTRAP_FRAME_Rax]
mov rcx, [rsp + KTRAP_FRAME_Rcx]
mov rdx, [rsp + KTRAP_FRAME_Rdx]
mov r8, [rsp + KTRAP_FRAME_R8]
mov r9, [rsp + KTRAP_FRAME_R9]
mov r10, [rsp + KTRAP_FRAME_R10]
mov r11, [rsp + KTRAP_FRAME_R11]
/* Restore RSP */
mov rsp, [rsp + KTRAP_FRAME_Rsp]
/* Restore EFLAGS */
popfq
ret
.globl KdbpStackSwitchAndCall
KdbpStackSwitchAndCall:
/* Save old stack */
mov rax, rsp
/* Set new stack */
mov rsp, rcx
/* Save old stack on new stack */
push rax
/* Call function */
call rdx
/* Restire old stack */
pop rax
mov rsp, rax
/* Return */
ret

View file

@ -0,0 +1,64 @@
/*
* FILE: ntoskrnl/ke/i386/boot.S
* COPYRIGHT: See COPYING in the top level directory
* PURPOSE: FreeLDR Wrapper Bootstrap Code and Bootstrap Trampoline
* PROGRAMMERs: Alex Ionescu (alex@relsoft.net)
* Thomas Weidenmueller <w3seek@reactos.org>
*/
/* INCLUDES ******************************************************************/
#include <reactos/asm.h>
#include <ndk/amd64/asm.h>
EXTERN KiInitializeKernelAndGotoIdleLoop:PROC
/* GLOBALS *******************************************************************/
/* FUNCTIONS *****************************************************************/
.code64
.text
/**
* VOID
* KiSetupStackAndInitializeKernel(
* IN PKPROCESS InitProcess, <rsp + 0x08, rcx>
* IN PKTHREAD InitThread, <rsp + 0x10, rdx>
* IN PVOID IdleStack, <rsp + 0x18, r8>
* IN PKPRCB Prcb, <rsp + 0x20, r9>
* IN CCHAR Number, <rsp + 0x28>
* IN PLOADER_PARAMETER_BLOCK LoaderBlock) <rsp + 0x30>
*/
PUBLIC KiSetupStackAndInitializeKernel
.PROC KiSetupStackAndInitializeKernel
/* Save current stack */
mov rsi, rsp
/* Setup the new stack */
mov ax, HEX(18)
mov ss, ax
mov rsp, r8
sub rsp, HEX(300) // FIXME
/* Copy stack parameters to the new stack */
sub rsp, HEX(38)
.ENDPROLOG
mov rdi, rsp
movsq
movsq
movsq
movsq
movsq
movsq
movsq
jmp KiInitializeKernelAndGotoIdleLoop
.ENDP KiSetupStackAndInitializeKernel
END

View file

@ -0,0 +1,250 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* PURPOSE: CONTEXT related functions
* PROGRAMMERS: Timo Kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* FUNCTIONS *****************************************************************/
VOID
NTAPI
KeContextToTrapFrame(IN PCONTEXT Context,
IN OUT PKEXCEPTION_FRAME ExceptionFrame,
IN OUT PKTRAP_FRAME TrapFrame,
IN ULONG ContextFlags,
IN KPROCESSOR_MODE PreviousMode)
{
KIRQL OldIrql;
/* Do this at APC_LEVEL */
OldIrql = KeGetCurrentIrql();
if (OldIrql < APC_LEVEL) KeRaiseIrql(APC_LEVEL, &OldIrql);
/* Handle integer registers */
if ((Context->ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
{
TrapFrame->Rax = Context->Rax;
TrapFrame->Rbx = Context->Rbx;
TrapFrame->Rcx = Context->Rcx;
TrapFrame->Rdx = Context->Rdx;
TrapFrame->Rsi = Context->Rsi;
TrapFrame->Rdi = Context->Rdi;
TrapFrame->Rbp = Context->Rbp;
TrapFrame->R8 = Context->R8;
TrapFrame->R9 = Context->R9;
TrapFrame->R10 = Context->R10;
TrapFrame->R11 = Context->R11;
ExceptionFrame->R12 = Context->R12;
ExceptionFrame->R13 = Context->R13;
ExceptionFrame->R14 = Context->R14;
ExceptionFrame->R15 = Context->R15;
}
/* Handle floating point registers */
if (((Context->ContextFlags & CONTEXT_FLOATING_POINT) ==
CONTEXT_FLOATING_POINT) && (Context->SegCs & MODE_MASK))
{
TrapFrame->Xmm0 = Context->Xmm0;
TrapFrame->Xmm1 = Context->Xmm1;
TrapFrame->Xmm2 = Context->Xmm2;
TrapFrame->Xmm3 = Context->Xmm3;
TrapFrame->Xmm4 = Context->Xmm4;
TrapFrame->Xmm5 = Context->Xmm5;
ExceptionFrame->Xmm6 = Context->Xmm6;
ExceptionFrame->Xmm7 = Context->Xmm7;
ExceptionFrame->Xmm8 = Context->Xmm8;
ExceptionFrame->Xmm9 = Context->Xmm9;
ExceptionFrame->Xmm10 = Context->Xmm10;
ExceptionFrame->Xmm11 = Context->Xmm11;
ExceptionFrame->Xmm12 = Context->Xmm12;
ExceptionFrame->Xmm13 = Context->Xmm13;
ExceptionFrame->Xmm14 = Context->Xmm14;
ExceptionFrame->Xmm15 = Context->Xmm15;
}
/* Handle control registers */
if ((Context->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
{
/* Check if this was a Kernel Trap */
if (Context->SegCs == KGDT64_R0_CODE)
{
/* Set valid selectors */
TrapFrame->SegCs = KGDT64_R0_CODE;
TrapFrame->SegSs = KGDT64_R0_DATA;
}
else
{
/* Copy selectors */
TrapFrame->SegCs = Context->SegCs;
TrapFrame->SegSs = Context->SegSs;
}
/* RIP, RSP, EFLAGS */
TrapFrame->Rip = Context->Rip;
TrapFrame->Rsp = Context->Rsp;
TrapFrame->EFlags = Context->EFlags;
}
/* Handle segment selectors */
if ((Context->ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS)
{
/* Check if this was a Kernel Trap */
if (Context->SegCs == KGDT64_R0_CODE)
{
/* Set valid selectors */
TrapFrame->SegDs = KGDT64_R3_DATA | RPL_MASK;
TrapFrame->SegEs = KGDT64_R3_DATA | RPL_MASK;
TrapFrame->SegFs = KGDT64_R3_CMTEB | RPL_MASK;
TrapFrame->SegGs = KGDT64_R3_DATA | RPL_MASK;
}
else
{
/* Copy selectors */
TrapFrame->SegDs = Context->SegDs;
TrapFrame->SegEs = Context->SegEs;
TrapFrame->SegFs = Context->SegFs;
TrapFrame->SegGs = Context->SegGs;
}
}
/* Handle debug registers */
if ((Context->ContextFlags & CONTEXT_DEBUG_REGISTERS) ==
CONTEXT_DEBUG_REGISTERS)
{
/* Copy the debug registers */
TrapFrame->Dr0 = Context->Dr0;
TrapFrame->Dr1 = Context->Dr1;
TrapFrame->Dr2 = Context->Dr2;
TrapFrame->Dr3 = Context->Dr3;
TrapFrame->Dr6 = Context->Dr6;
TrapFrame->Dr7 = Context->Dr7;
}
/* Restore IRQL */
if (OldIrql < APC_LEVEL) KeLowerIrql(OldIrql);
}
VOID
NTAPI
KeTrapFrameToContext(IN PKTRAP_FRAME TrapFrame,
IN PKEXCEPTION_FRAME ExceptionFrame,
IN OUT PCONTEXT Context)
{
KIRQL OldIrql;
/* Do this at APC_LEVEL */
OldIrql = KeGetCurrentIrql();
if (OldIrql < APC_LEVEL) KeRaiseIrql(APC_LEVEL, &OldIrql);
/* Handle integer registers */
if ((Context->ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
{
Context->Rax = TrapFrame->Rax;
Context->Rbx = TrapFrame->Rbx;
Context->Rcx = TrapFrame->Rcx;
Context->Rdx = TrapFrame->Rdx;
Context->Rsi = TrapFrame->Rsi;
Context->Rdi = TrapFrame->Rdi;
Context->Rbp = TrapFrame->Rbp;
Context->R8 = TrapFrame->R8;
Context->R9 = TrapFrame->R9;
Context->R10 = TrapFrame->R10;
Context->R11 = TrapFrame->R11;
Context->R12 = ExceptionFrame->R12;
Context->R13 = ExceptionFrame->R13;
Context->R14 = ExceptionFrame->R14;
Context->R15 = ExceptionFrame->R15;
}
/* Handle floating point registers */
if (((Context->ContextFlags & CONTEXT_FLOATING_POINT) ==
CONTEXT_FLOATING_POINT) && (TrapFrame->SegCs & MODE_MASK))
{
Context->Xmm0 = TrapFrame->Xmm0;
Context->Xmm1 = TrapFrame->Xmm1;
Context->Xmm2 = TrapFrame->Xmm2;
Context->Xmm3 = TrapFrame->Xmm3;
Context->Xmm4 = TrapFrame->Xmm4;
Context->Xmm5 = TrapFrame->Xmm5;
Context->Xmm6 = ExceptionFrame->Xmm6;
Context->Xmm7 = ExceptionFrame->Xmm7;
Context->Xmm8 = ExceptionFrame->Xmm8;
Context->Xmm9 = ExceptionFrame->Xmm9;
Context->Xmm10 = ExceptionFrame->Xmm10;
Context->Xmm11 = ExceptionFrame->Xmm11;
Context->Xmm12 = ExceptionFrame->Xmm12;
Context->Xmm13 = ExceptionFrame->Xmm13;
Context->Xmm14 = ExceptionFrame->Xmm14;
Context->Xmm15 = ExceptionFrame->Xmm15;
}
/* Handle control registers */
if ((Context->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
{
/* Check if this was a Kernel Trap */
if (TrapFrame->SegCs == KGDT64_R0_CODE)
{
/* Set valid selectors */
Context->SegCs = KGDT64_R0_CODE;
Context->SegSs = KGDT64_R0_DATA;
}
else
{
/* Copy selectors */
Context->SegCs = TrapFrame->SegCs;
Context->SegSs = TrapFrame->SegSs;
}
/* Copy RIP, RSP, EFLAGS */
Context->Rip = TrapFrame->Rip;
Context->Rsp = TrapFrame->Rsp;
Context->EFlags = TrapFrame->EFlags;
}
/* Handle segment selectors */
if ((Context->ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS)
{
/* Check if this was a Kernel Trap */
if (TrapFrame->SegCs == KGDT64_R0_CODE)
{
/* Set valid selectors */
Context->SegDs = KGDT64_R3_DATA | RPL_MASK;
Context->SegEs = KGDT64_R3_DATA | RPL_MASK;
Context->SegFs = KGDT64_R3_CMTEB | RPL_MASK;
Context->SegGs = KGDT64_R3_DATA | RPL_MASK;
}
else
{
/* Copy selectors */
Context->SegDs = TrapFrame->SegDs;
Context->SegEs = TrapFrame->SegEs;
Context->SegFs = TrapFrame->SegFs;
Context->SegGs = TrapFrame->SegGs;
}
}
/* Handle debug registers */
if ((Context->ContextFlags & CONTEXT_DEBUG_REGISTERS) ==
CONTEXT_DEBUG_REGISTERS)
{
/* Copy the debug registers */
Context->Dr0 = TrapFrame->Dr0;
Context->Dr1 = TrapFrame->Dr1;
Context->Dr2 = TrapFrame->Dr2;
Context->Dr3 = TrapFrame->Dr3;
Context->Dr6 = TrapFrame->Dr6;
Context->Dr7 = TrapFrame->Dr7;
}
/* Restore IRQL */
if (OldIrql < APC_LEVEL) KeLowerIrql(OldIrql);
}

View file

@ -0,0 +1,589 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ke/amd64/cpu.c
* PURPOSE: Routines for CPU-level support
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
* Timo Kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES *****************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* FIXME: Local EFLAGS defines not used anywhere else */
#define EFLAGS_IOPL 0x3000
#define EFLAGS_NF 0x4000
#define EFLAGS_RF 0x10000
#define EFLAGS_ID 0x200000
/* GLOBALS *******************************************************************/
/* The Boot TSS */
KTSS64 KiBootTss;
/* CPU Features and Flags */
ULONG KeI386CpuType;
ULONG KeI386CpuStep;
ULONG KeProcessorArchitecture;
ULONG KeProcessorLevel;
ULONG KeProcessorRevision;
ULONG KeFeatureBits;
ULONG KeI386MachineType;
ULONG KeI386NpxPresent = 1;
ULONG KeLargestCacheLine = 0x40;
ULONG KiDmaIoCoherency = 0;
CHAR KeNumberProcessors = 0;
KAFFINITY KeActiveProcessors = 1;
BOOLEAN KiI386PentiumLockErrataPresent;
BOOLEAN KiSMTProcessorsPresent;
/* Freeze data */
KIRQL KiOldIrql;
ULONG KiFreezeFlag;
/* Flush data */
volatile LONG KiTbFlushTimeStamp;
/* CPU Signatures */
static const CHAR CmpIntelID[] = "GenuineIntel";
static const CHAR CmpAmdID[] = "AuthenticAMD";
static const CHAR CmpCyrixID[] = "CyrixInstead";
static const CHAR CmpTransmetaID[] = "GenuineTMx86";
static const CHAR CmpCentaurID[] = "CentaurHauls";
static const CHAR CmpRiseID[] = "RiseRiseRise";
/* SUPPORT ROUTINES FOR MSVC COMPATIBILITY ***********************************/
VOID
NTAPI
CPUID(IN ULONG InfoType,
OUT PULONG CpuInfoEax,
OUT PULONG CpuInfoEbx,
OUT PULONG CpuInfoEcx,
OUT PULONG CpuInfoEdx)
{
ULONG CpuInfo[4];
/* Perform the CPUID Operation */
__cpuid((int*)CpuInfo, InfoType);
/* Return the results */
*CpuInfoEax = CpuInfo[0];
*CpuInfoEbx = CpuInfo[1];
*CpuInfoEcx = CpuInfo[2];
*CpuInfoEdx = CpuInfo[3];
}
/* FUNCTIONS *****************************************************************/
VOID
NTAPI
KiSetProcessorType(VOID)
{
ULONG64 EFlags;
INT Reg[4];
ULONG Stepping, Type;
/* Start by assuming no CPUID data */
KeGetCurrentPrcb()->CpuID = 0;
/* Save EFlags */
EFlags = __readeflags();
/* Do CPUID 1 now */
__cpuid(Reg, 1);
/*
* Get the Stepping and Type. The stepping contains both the
* Model and the Step, while the Type contains the returned Type.
* We ignore the family.
*
* For the stepping, we convert this: zzzzzzxy into this: x0y
*/
Stepping = Reg[0] & 0xF0;
Stepping <<= 4;
Stepping += (Reg[0] & 0xFF);
Stepping &= 0xF0F;
Type = Reg[0] & 0xF00;
Type >>= 8;
/* Save them in the PRCB */
KeGetCurrentPrcb()->CpuID = TRUE;
KeGetCurrentPrcb()->CpuType = (UCHAR)Type;
KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping;
/* Restore EFLAGS */
__writeeflags(EFlags);
}
ULONG
NTAPI
KiGetCpuVendor(VOID)
{
PKPRCB Prcb = KeGetCurrentPrcb();
INT Vendor[5];
/* Get the Vendor ID and null-terminate it */
__cpuid(Vendor, 0);
/* Copy it to the PRCB and null-terminate it */
*(ULONG*)&Prcb->VendorString[0] = Vendor[1]; // ebx
*(ULONG*)&Prcb->VendorString[4] = Vendor[3]; // edx
*(ULONG*)&Prcb->VendorString[8] = Vendor[2]; // ecx
*(ULONG*)&Prcb->VendorString[12] = 0;
/* Now check the CPU Type */
if (!strcmp((PCHAR)Prcb->VendorString, CmpIntelID))
{
return CPU_INTEL;
}
else if (!strcmp((PCHAR)Prcb->VendorString, CmpAmdID))
{
return CPU_AMD;
}
else if (!strcmp((PCHAR)Prcb->VendorString, CmpCyrixID))
{
DPRINT1("Cyrix CPUs not fully supported\n");
return 0;
}
else if (!strcmp((PCHAR)Prcb->VendorString, CmpTransmetaID))
{
DPRINT1("Transmeta CPUs not fully supported\n");
return 0;
}
else if (!strcmp((PCHAR)Prcb->VendorString, CmpCentaurID))
{
DPRINT1("VIA CPUs not fully supported\n");
return 0;
}
else if (!strcmp((PCHAR)Prcb->VendorString, CmpRiseID))
{
DPRINT1("Rise CPUs not fully supported\n");
return 0;
}
/* Invalid CPU */
return 0;
}
ULONG
NTAPI
KiGetFeatureBits(VOID)
{
PKPRCB Prcb = KeGetCurrentPrcb();
ULONG Vendor;
ULONG FeatureBits = KF_WORKING_PTE;
INT Reg[4];
ULONG CpuFeatures = 0;
/* Get the Vendor ID */
Vendor = KiGetCpuVendor();
/* Make sure we got a valid vendor ID at least. */
if (!Vendor) return FeatureBits;
/* Get the CPUID Info. Features are in Reg[3]. */
__cpuid(Reg, 1);
/* Set the initial APIC ID */
Prcb->InitialApicId = (UCHAR)(Reg[1] >> 24);
/* Set the current features */
CpuFeatures = Reg[3];
/* Convert all CPUID Feature bits into our format */
if (CpuFeatures & 0x00000002) FeatureBits |= KF_V86_VIS | KF_CR4;
if (CpuFeatures & 0x00000008) FeatureBits |= KF_LARGE_PAGE | KF_CR4;
if (CpuFeatures & 0x00000010) FeatureBits |= KF_RDTSC;
if (CpuFeatures & 0x00000100) FeatureBits |= KF_CMPXCHG8B;
if (CpuFeatures & 0x00000800) FeatureBits |= KF_FAST_SYSCALL;
if (CpuFeatures & 0x00001000) FeatureBits |= KF_MTRR;
if (CpuFeatures & 0x00002000) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4;
if (CpuFeatures & 0x00008000) FeatureBits |= KF_CMOV;
if (CpuFeatures & 0x00010000) FeatureBits |= KF_PAT;
if (CpuFeatures & 0x00200000) FeatureBits |= KF_DTS;
if (CpuFeatures & 0x00800000) FeatureBits |= KF_MMX;
if (CpuFeatures & 0x01000000) FeatureBits |= KF_FXSR;
if (CpuFeatures & 0x02000000) FeatureBits |= KF_XMMI;
if (CpuFeatures & 0x04000000) FeatureBits |= KF_XMMI64;
#if 0
if (Reg[2] & 0x00000001) FeatureBits |= KF_SSE3NEW;
if (Reg[2] & 0x00000008) FeatureBits |= KF_MONITOR;
if (Reg[2] & 0x00000200) FeatureBits |= KF_SSE3SUP;
if (Reg[2] & 0x00002000) FeatureBits |= KF_CMPXCHG16B;
if (Reg[2] & 0x00080000) FeatureBits |= KF_SSE41;
if (Reg[2] & 0x00800000) FeatureBits |= KF_POPCNT;
#endif
/* Check if the CPU has hyper-threading */
if (CpuFeatures & 0x10000000)
{
/* Set the number of logical CPUs */
Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(Reg[1] >> 16);
if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1)
{
/* We're on dual-core */
KiSMTProcessorsPresent = TRUE;
}
}
else
{
/* We only have a single CPU */
Prcb->LogicalProcessorsPerPhysicalProcessor = 1;
}
/* Check extended cpuid features */
__cpuid(Reg, 0x80000000);
if ((Reg[0] & 0xffffff00) == 0x80000000)
{
/* Check if CPUID 0x80000001 is supported */
if (Reg[0] >= 0x80000001)
{
/* Check which extended features are available. */
__cpuid(Reg, 0x80000001);
/* Check if NX-bit is supported */
if (Reg[3] & 0x00100000) FeatureBits |= KF_NX_BIT;
/* Now handle each features for each CPU Vendor */
switch (Vendor)
{
case CPU_AMD:
if (Reg[3] & 0x80000000) FeatureBits |= KF_3DNOW;
break;
}
}
}
/* Return the Feature Bits */
return FeatureBits;
}
VOID
NTAPI
KiGetCacheInformation(VOID)
{
PKIPCR Pcr = (PKIPCR)KeGetPcr();
ULONG Vendor;
INT Data[4];
ULONG CacheRequests = 0, i;
ULONG CurrentRegister;
UCHAR RegisterByte;
BOOLEAN FirstPass = TRUE;
/* Set default L2 size */
Pcr->SecondLevelCacheSize = 0;
/* Get the Vendor ID and make sure we support CPUID */
Vendor = KiGetCpuVendor();
if (!Vendor) return;
/* Check the Vendor ID */
switch (Vendor)
{
/* Handle Intel case */
case CPU_INTEL:
/*Check if we support CPUID 2 */
__cpuid(Data, 0);
if (Data[0] >= 2)
{
/* We need to loop for the number of times CPUID will tell us to */
do
{
/* Do the CPUID call */
__cpuid(Data, 2);
/* Check if it was the first call */
if (FirstPass)
{
/*
* The number of times to loop is the first byte. Read
* it and then destroy it so we don't get confused.
*/
CacheRequests = Data[0] & 0xFF;
Data[0] &= 0xFFFFFF00;
/* Don't go over this again */
FirstPass = FALSE;
}
/* Loop all 4 registers */
for (i = 0; i < 4; i++)
{
/* Get the current register */
CurrentRegister = Data[i];
/*
* If the upper bit is set, then this register should
* be skipped.
*/
if (CurrentRegister & 0x80000000) continue;
/* Keep looping for every byte inside this register */
while (CurrentRegister)
{
/* Read a byte, skip a byte. */
RegisterByte = (UCHAR)(CurrentRegister & 0xFF);
CurrentRegister >>= 8;
if (!RegisterByte) continue;
/*
* Valid values are from 0x40 (0 bytes) to 0x49
* (32MB), or from 0x80 to 0x89 (same size but
* 8-way associative.
*/
if (((RegisterByte > 0x40) &&
(RegisterByte <= 0x49)) ||
((RegisterByte > 0x80) &&
(RegisterByte <= 0x89)))
{
/* Mask out only the first nibble */
RegisterByte &= 0x0F;
/* Set the L2 Cache Size */
Pcr->SecondLevelCacheSize = 0x10000 <<
RegisterByte;
}
}
}
} while (--CacheRequests);
}
break;
case CPU_AMD:
/* Check if we support CPUID 0x80000006 */
__cpuid(Data, 0x80000000);
if (Data[0] >= 6)
{
/* Get 2nd level cache and tlb size */
__cpuid(Data, 0x80000006);
/* Set the L2 Cache Size */
Pcr->SecondLevelCacheSize = (Data[2] & 0xFFFF0000) >> 6;
}
break;
}
}
VOID
FASTCALL
KiInitializeTss(IN PKTSS64 Tss,
IN UINT64 Stack)
{
PKGDTENTRY64 TssEntry;
/* Get pointer to the GDT entry */
TssEntry = KiGetGdtEntry(KeGetPcr()->GdtBase, KGDT64_SYS_TSS);
/* Initialize the GDT entry */
KiInitGdtEntry(TssEntry, (ULONG64)Tss, sizeof(KTSS64), AMD64_TSS, 0);
/* Zero out the TSS */
RtlZeroMemory(Tss, sizeof(KTSS64));
/* FIXME: I/O Map? */
Tss->IoMapBase = 0x68;
/* Setup ring 0 stack pointer */
Tss->Rsp0 = Stack;
/* Setup a stack for Double Fault Traps */
Tss->Ist[1] = (ULONG64)KiDoubleFaultStack;
/* Setup a stack for CheckAbort Traps */
Tss->Ist[2] = (ULONG64)KiDoubleFaultStack;
/* Setup a stack for NMI Traps */
Tss->Ist[3] = (ULONG64)KiDoubleFaultStack;
/* Load the task register */
__ltr(KGDT64_SYS_TSS);
}
VOID
NTAPI
KeFlushCurrentTb(VOID)
{
/* Flush the TLB by resetting CR3 */
__writecr3(__readcr3());
}
VOID
NTAPI
KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState)
{
/* Restore the CR registers */
__writecr0(ProcessorState->SpecialRegisters.Cr0);
// __writecr2(ProcessorState->SpecialRegisters.Cr2);
__writecr3(ProcessorState->SpecialRegisters.Cr3);
__writecr4(ProcessorState->SpecialRegisters.Cr4);
__writecr8(ProcessorState->SpecialRegisters.Cr8);
/* Restore the DR registers */
__writedr(0, ProcessorState->SpecialRegisters.KernelDr0);
__writedr(1, ProcessorState->SpecialRegisters.KernelDr1);
__writedr(2, ProcessorState->SpecialRegisters.KernelDr2);
__writedr(3, ProcessorState->SpecialRegisters.KernelDr3);
__writedr(6, ProcessorState->SpecialRegisters.KernelDr6);
__writedr(7, ProcessorState->SpecialRegisters.KernelDr7);
/* Restore GDT, IDT, LDT and TSS */
__lgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
// __lldt(&ProcessorState->SpecialRegisters.Ldtr);
// __ltr(&ProcessorState->SpecialRegisters.Tr);
__lidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
// __ldmxcsr(&ProcessorState->SpecialRegisters.MxCsr); // FIXME
// ProcessorState->SpecialRegisters.DebugControl
// ProcessorState->SpecialRegisters.LastBranchToRip
// ProcessorState->SpecialRegisters.LastBranchFromRip
// ProcessorState->SpecialRegisters.LastExceptionToRip
// ProcessorState->SpecialRegisters.LastExceptionFromRip
/* Restore MSRs */
__writemsr(X86_MSR_GSBASE, ProcessorState->SpecialRegisters.MsrGsBase);
__writemsr(X86_MSR_KERNEL_GSBASE, ProcessorState->SpecialRegisters.MsrGsSwap);
__writemsr(X86_MSR_STAR, ProcessorState->SpecialRegisters.MsrStar);
__writemsr(X86_MSR_LSTAR, ProcessorState->SpecialRegisters.MsrLStar);
__writemsr(X86_MSR_CSTAR, ProcessorState->SpecialRegisters.MsrCStar);
__writemsr(X86_MSR_SFMASK, ProcessorState->SpecialRegisters.MsrSyscallMask);
}
VOID
NTAPI
KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState)
{
/* Save the CR registers */
ProcessorState->SpecialRegisters.Cr0 = __readcr0();
ProcessorState->SpecialRegisters.Cr2 = __readcr2();
ProcessorState->SpecialRegisters.Cr3 = __readcr3();
ProcessorState->SpecialRegisters.Cr4 = __readcr4();
ProcessorState->SpecialRegisters.Cr8 = __readcr8();
/* Save the DR registers */
ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0);
ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1);
ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2);
ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3);
ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6);
ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7);
/* Save GDT, IDT, LDT and TSS */
__sgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
__sldt(&ProcessorState->SpecialRegisters.Ldtr);
__str(&ProcessorState->SpecialRegisters.Tr);
__sidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
// __stmxcsr(&ProcessorState->SpecialRegisters.MxCsr);
// ProcessorState->SpecialRegisters.DebugControl =
// ProcessorState->SpecialRegisters.LastBranchToRip =
// ProcessorState->SpecialRegisters.LastBranchFromRip =
// ProcessorState->SpecialRegisters.LastExceptionToRip =
// ProcessorState->SpecialRegisters.LastExceptionFromRip =
/* Save MSRs */
ProcessorState->SpecialRegisters.MsrGsBase = __readmsr(X86_MSR_GSBASE);
ProcessorState->SpecialRegisters.MsrGsSwap = __readmsr(X86_MSR_KERNEL_GSBASE);
ProcessorState->SpecialRegisters.MsrStar = __readmsr(X86_MSR_STAR);
ProcessorState->SpecialRegisters.MsrLStar = __readmsr(X86_MSR_LSTAR);
ProcessorState->SpecialRegisters.MsrCStar = __readmsr(X86_MSR_CSTAR);
ProcessorState->SpecialRegisters.MsrSyscallMask = __readmsr(X86_MSR_SFMASK);
}
VOID
NTAPI
KeFlushEntireTb(IN BOOLEAN Invalid,
IN BOOLEAN AllProcessors)
{
KIRQL OldIrql;
// FIXME: halfplemented
/* Raise the IRQL for the TB Flush */
OldIrql = KeRaiseIrqlToSynchLevel();
/* Flush the TB for the Current CPU, and update the flush stamp */
KeFlushCurrentTb();
/* Update the flush stamp and return to original IRQL */
InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1);
KeLowerIrql(OldIrql);
}
KAFFINITY
NTAPI
KeQueryActiveProcessors(VOID)
{
PAGED_CODE();
/* Simply return the number of active processors */
return KeActiveProcessors;
}
NTSTATUS
NTAPI
KeSaveFloatingPointState(OUT PKFLOATING_SAVE Save)
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
NTSTATUS
NTAPI
KeRestoreFloatingPointState(IN PKFLOATING_SAVE Save)
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
BOOLEAN
NTAPI
KeInvalidateAllCaches(VOID)
{
/* Invalidate all caches */
__wbinvd();
return TRUE;
}
/*
* @implemented
*/
ULONG
NTAPI
KeGetRecommendedSharedDataAlignment(VOID)
{
/* Return the global variable */
return KeLargestCacheLine;
}
/*
* @implemented
*/
VOID
__cdecl
KeSaveStateForHibernate(IN PKPROCESSOR_STATE State)
{
/* Capture the context */
RtlCaptureContext(&State->ContextFrame);
/* Capture the control state */
KiSaveProcessorControlState(State);
}
/*
* @implemented
*/
VOID
NTAPI
KeSetDmaIoCoherency(IN ULONG Coherency)
{
/* Save the coherency globally */
KiDmaIoCoherency = Coherency;
}

View file

@ -0,0 +1,199 @@
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/ke/amd64/ctxswitch.S
* PURPOSE: Thread Context Switching
*
* PROGRAMMER: Timo kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES ******************************************************************/
#include <reactos/asm.h>
#include <ndk/amd64/asm.h>
/* FUNCTIONS ****************************************************************/
.code64
/*++
* KiThreadStartup
*
* The KiThreadStartup routine is the beginning of any thread.
*
* Params:
* SystemRoutine - Pointer to the System Startup Routine. Either
* PspUserThreadStartup or PspSystemThreadStartup
*
* StartRoutine - For Kernel Threads only, specifies the starting execution
* point of the new thread.
*
* StartContext - For Kernel Threads only, specifies a pointer to variable
* context data to be sent to the StartRoutine above.
*
* UserThread - Indicates whether or not this is a user thread. This tells
* us if the thread has a context or not.
*
* TrapFrame - Pointer to the KTHREAD to which the caller wishes to
* switch from.
*
* Returns:
* Should never return for a system thread. Returns through the System Call
* Exit Dispatcher for a user thread.
*
* Remarks:
* If a return from a system thread is detected, a bug check will occur.
*
*--*/
PUBLIC KiThreadStartup
KiThreadStartup:
/*
* Clear all the non-volatile registers, so the thread won't be tempted to
* expect any static data (like some badly coded usermode/win9x apps do)
*/
xor rbx, rbx
xor rsi, rsi
xor rdi, rdi
xor rbp, rbp
xor r10, r10
xor r11, r11
xor r12, r12
xor r13, r13
xor r14, r14
xor r15, r15
/* It's now safe to go to APC */
mov rax, APC_LEVEL
mov cr8, rax
/*
* Call the System Routine which is right on our stack now.
* After we pop the pointer, the Start Routine/Context is on the
* stack, we pop it as parameters to the System Routine into rcx
*/
pop rax
pop rcx
call rax
/* The thread returned... was it a user-thread? */
pop rcx
or rcx, rcx
jz BadThread
/* Yes it was, set our trapframe for the System Call Exit Dispatcher */
mov ebp, esp
/* Exit back to user-mode */
// jmp _KiServiceExit2
UNIMPLEMENTED KiThreadStartup_KiServiceExit2
BadThread:
/* A system thread returned...this is very bad! */
int 3
/*++
* KiSwapContextInternal
*
* The KiSwapContextInternal routine switches context to another thread.
*
* Params:
* ESI - Pointer to the KTHREAD to which the caller wishes to
* switch to.
* EDI - Pointer to the KTHREAD to which the caller wishes to
* switch from.
*
* Returns:
* None.
*
* Remarks:
* Absolutely all registers except ESP can be trampled here for maximum code flexibility.
*
*--*/
PUBLIC KiSwapContextInternal
KiSwapContextInternal:
UNIMPLEMENTED KiSwapContextInternal
ret
/**
* KiSwapContext
*
* \brief
* The KiSwapContext routine switches context to another thread.
*
* BOOLEAN
* KiSwapContext(PKTHREAD CurrentThread, PKTHREAD TargetThread);
*
* \param CurrentThread
* Pointer to the KTHREAD of the current thread.
*
* \param TargetThread
* Pointer to the KTHREAD to which the caller wishes to switch to.
*
* \returns
* The WaitStatus of the Target Thread.
*
* \remarks
* This is a wrapper around KiSwapContextInternal which will save all the
* non-volatile registers so that the Internal function can use all of
* them. It will also save the old current thread and set the new one.
*
* The calling thread does not return after KiSwapContextInternal until
* another thread switches to IT.
*
*--*/
PUBLIC KiSwapContext
KiSwapContext:
/* Save 10 registers */
sub rsp, 10 * 8
/* Save all the non-volatile ones */
mov [rsp+72], r15
mov [rsp+64], r14
mov [rsp+56], r13
mov [rsp+48], r12
mov [rsp+40], r11
mov [rsp+32], r10
mov [rsp+24], rbx
mov [rsp+16], rsi
mov [rsp+8], rdi
mov [rsp+0], rbp
/* Get the PCR */
mov rbx, gs:[KPCR_SELF]
/* Get the current thread */
mov rdi, rcx
/* Get the New Thread */
mov rsi, rdx
/* Get the wait IRQL */
movzx ecx, byte ptr [edi+KTHREAD_WAIT_IRQL]
/* Do the swap with the registers correctly setup */
call KiSwapContextInternal
/* Restore the registers */
mov rbp, [rsp+0]
mov rdi, [rsp+8]
mov rsi, [rsp+16]
mov rbx, [rsp+24]
mov r10, [rsp+32]
mov r11, [rsp+40]
mov r12, [rsp+48]
mov r13, [rsp+56]
mov r14, [rsp+64]
mov r15, [rsp+72]
/* Clean stack */
add esp, 10 * 8
ret
END

View file

@ -0,0 +1,299 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ke/amd64/except.c
* PURPOSE: Exception Dispatching for amd64
* PROGRAMMER: Timo Kreuzer (timo.kreuzer@reactos.org)
* Alex Ionescu (alex.ionescu@reactos.org)
*/
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
extern ULONG64 InterruptDispatchTable[256];
/* GLOBALS *******************************************************************/
KIDT_INIT KiInterruptInitTable[] =
{
/* Id, Dpl, IST, ServiceRoutine */
{0x00, 0x00, 0x00, KiDivideErrorFault},
{0x01, 0x00, 0x00, KiDebugTrapOrFault},
{0x02, 0x00, 0x03, KiNmiInterrupt},
{0x03, 0x03, 0x00, KiBreakpointTrap},
{0x04, 0x03, 0x00, KiOverflowTrap},
{0x05, 0x00, 0x00, KiBoundFault},
{0x06, 0x00, 0x00, KiInvalidOpcodeFault},
{0x07, 0x00, 0x00, KiNpxNotAvailableFault},
{0x08, 0x00, 0x01, KiDoubleFaultAbort},
{0x09, 0x00, 0x00, KiNpxSegmentOverrunAbort},
{0x0A, 0x00, 0x00, KiInvalidTssFault},
{0x0B, 0x00, 0x00, KiSegmentNotPresentFault},
{0x0C, 0x00, 0x00, KiStackFault},
{0x0D, 0x00, 0x00, KiGeneralProtectionFault},
{0x0E, 0x00, 0x00, KiPageFault},
{0x10, 0x00, 0x00, KiFloatingErrorFault},
{0x11, 0x00, 0x00, KiAlignmentFault},
{0x12, 0x00, 0x02, KiMcheckAbort},
{0x13, 0x00, 0x00, KiXmmException},
{0x1F, 0x00, 0x00, KiApcInterrupt},
{0x2C, 0x03, 0x00, KiRaiseAssertion},
{0x2D, 0x03, 0x00, KiDebugServiceTrap},
{0x2F, 0x00, 0x00, KiDpcInterrupt},
{0xE1, 0x00, 0x00, KiIpiInterrupt},
{0, 0, 0, 0}
};
KIDTENTRY64 KiIdt[256];
KDESCRIPTOR KiIdtDescriptor = {{0}, sizeof(KiIdt) - 1, KiIdt};
/* FUNCTIONS *****************************************************************/
VOID
INIT_FUNCTION
NTAPI
KeInitExceptions(VOID)
{
int i, j;
/* Initialize the Idt */
for (j = i = 0; i < 256; i++)
{
ULONG64 Offset;
if (KiInterruptInitTable[j].InterruptId == i)
{
Offset = (ULONG64)KiInterruptInitTable[j].ServiceRoutine;
KiIdt[i].Dpl = KiInterruptInitTable[j].Dpl;
KiIdt[i].IstIndex = KiInterruptInitTable[j].IstIndex;
j++;
}
else
{
Offset = (ULONG64)&InterruptDispatchTable[i];
KiIdt[i].Dpl = 0;
KiIdt[i].IstIndex = 0;
}
KiIdt[i].OffsetLow = Offset & 0xffff;
KiIdt[i].Selector = KGDT64_R0_CODE;
KiIdt[i].Type = 0x0e;
KiIdt[i].Reserved0 = 0;
KiIdt[i].Present = 1;
KiIdt[i].OffsetMiddle = (Offset >> 16) & 0xffff;
KiIdt[i].OffsetHigh = (Offset >> 32);
KiIdt[i].Reserved1 = 0;
}
KeGetPcr()->IdtBase = KiIdt;
__lidt(&KiIdtDescriptor.Limit);
}
VOID
NTAPI
KiDispatchException(IN PEXCEPTION_RECORD ExceptionRecord,
IN PKEXCEPTION_FRAME ExceptionFrame,
IN PKTRAP_FRAME TrapFrame,
IN KPROCESSOR_MODE PreviousMode,
IN BOOLEAN FirstChance)
{
CONTEXT Context;
// FrLdrDbgPrint("KiDispatchException(%p, %p, %p, %d, %d)\n",
// ExceptionRecord, ExceptionFrame, TrapFrame, PreviousMode, FirstChance);
/* Increase number of Exception Dispatches */
KeGetCurrentPrcb()->KeExceptionDispatchCount++;
/* Set the context flags */
Context.ContextFlags = CONTEXT_ALL;
/* Get a Context */
KeTrapFrameToContext(TrapFrame, ExceptionFrame, &Context);
/* Look at our exception code */
switch (ExceptionRecord->ExceptionCode)
{
/* Breakpoint */
case STATUS_BREAKPOINT:
/* Decrement RIP by one */
Context.Rip--;
break;
/* Internal exception */
case KI_EXCEPTION_ACCESS_VIOLATION:
/* Set correct code */
ExceptionRecord->ExceptionCode = STATUS_ACCESS_VIOLATION;
if (PreviousMode == UserMode)
{
/* FIXME: Handle no execute */
}
break;
}
/* Handle kernel-mode first, it's simpler */
if (PreviousMode == KernelMode)
{
/* Check if this is a first-chance exception */
if (FirstChance == TRUE)
{
/* Break into the debugger for the first time */
if (KiDebugRoutine(TrapFrame,
ExceptionFrame,
ExceptionRecord,
&Context,
PreviousMode,
FALSE))
{
/* Exception was handled */
goto Handled;
}
/* If the Debugger couldn't handle it, dispatch the exception */
if (RtlDispatchException(ExceptionRecord, &Context)) goto Handled;
}
/* This is a second-chance exception, only for the debugger */
if (KiDebugRoutine(TrapFrame,
ExceptionFrame,
ExceptionRecord,
&Context,
PreviousMode,
TRUE))
{
/* Exception was handled */
goto Handled;
}
/* Third strike; you're out */
KeBugCheckEx(KMODE_EXCEPTION_NOT_HANDLED,
ExceptionRecord->ExceptionCode,
(ULONG_PTR)ExceptionRecord->ExceptionAddress,
(ULONG_PTR)TrapFrame,
0);
}
else
{
/* FIXME: user-mode exception handling unimplemented */
ASSERT(FALSE);
}
Handled:
/* Convert the context back into Trap/Exception Frames */
KeContextToTrapFrame(&Context,
ExceptionFrame,
TrapFrame,
Context.ContextFlags,
PreviousMode);
return;
}
NTSTATUS
NTAPI
KeRaiseUserException(IN NTSTATUS ExceptionCode)
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
VOID
DECLSPEC_NORETURN
KiSystemFatalException(IN ULONG ExceptionCode,
IN PKTRAP_FRAME TrapFrame)
{
/* Bugcheck the system */
KeBugCheckWithTf(UNEXPECTED_KERNEL_MODE_TRAP,
ExceptionCode,
0,
0,
0,
TrapFrame);
}
NTSTATUS
NTAPI
KiNpxNotAvailableFaultHandler(
IN PKTRAP_FRAME TrapFrame)
{
UNIMPLEMENTED;
KeBugCheckWithTf(TRAP_CAUSE_UNKNOWN, 13, 0, 0, 1, TrapFrame);
return -1;
}
NTSTATUS
NTAPI
KiGeneralProtectionFaultHandler(
IN PKTRAP_FRAME TrapFrame)
{
PUCHAR Instructions;
/* Check for user-mode GPF */
if (TrapFrame->SegCs & 3)
{
UNIMPLEMENTED;
ASSERT(FALSE);
}
/* Check for lazy segment load */
if (TrapFrame->SegDs != (KGDT64_R3_DATA | RPL_MASK))
{
/* Fix it */
TrapFrame->SegDs = (KGDT64_R3_DATA | RPL_MASK);
return STATUS_SUCCESS;
}
else if (TrapFrame->SegEs != (KGDT64_R3_DATA | RPL_MASK))
{
/* Fix it */
TrapFrame->SegEs = (KGDT64_R3_DATA | RPL_MASK);
return STATUS_SUCCESS;
}
/* Check for nested exception */
if ((TrapFrame->Rip >= (ULONG64)KiGeneralProtectionFaultHandler) &&
(TrapFrame->Rip < (ULONG64)KiGeneralProtectionFaultHandler))
{
/* Not implemented */
UNIMPLEMENTED;
ASSERT(FALSE);
}
/* Get Instruction Pointer */
Instructions = (PUCHAR)TrapFrame->Rip;
/* Check for IRET */
if (Instructions[0] == 0x48 && Instructions[1] == 0xCF)
{
/* Not implemented */
UNIMPLEMENTED;
ASSERT(FALSE);
}
/* Check for RDMSR/WRMSR */
if ((Instructions[0] == 0xF) && // 2-byte opcode
(((Instructions[1] >> 8) == 0x30) || // RDMSR
((Instructions[2] >> 8) == 0x32))) // WRMSR
{
/* Unknown CPU MSR, so raise an access violation */
return STATUS_ACCESS_VIOLATION;
}
ASSERT(FALSE);
return STATUS_UNSUCCESSFUL;
}
NTSTATUS
NTAPI
KiXmmExceptionHandler(
IN PKTRAP_FRAME TrapFrame)
{
UNIMPLEMENTED;
KeBugCheckWithTf(TRAP_CAUSE_UNKNOWN, 13, 0, 0, 1, TrapFrame);
return -1;
}

View file

@ -0,0 +1,62 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ke/i386/irq.c
* PURPOSE: Manages the Kernel's IRQ support for external drivers,
* for the purpopses of connecting, disconnecting and setting
* up ISRs for drivers. The backend behind the Io* Interrupt
* routines.
* PROGRAMMERS: Timo Kreuzer (timo.kreuzer@web.de)
*/
/* INCLUDES *****************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* FUNCTIONS ****************************************************************/
BOOLEAN
NTAPI
KeDisableInterrupts(VOID)
{
ULONG64 Flags;
/* Get the flags */
Flags = __readeflags();
/* Disable interrupts */
_disable();
return !!(Flags & EFLAGS_INTERRUPT_MASK);
}
BOOLEAN
NTAPI
KeDisconnectInterrupt(IN PKINTERRUPT Interrupt)
{
UNIMPLEMENTED;
return FALSE;
}
VOID
NTAPI
KeInitializeInterrupt(IN PKINTERRUPT Interrupt,
IN PKSERVICE_ROUTINE ServiceRoutine,
IN PVOID ServiceContext,
IN PKSPIN_LOCK SpinLock,
IN ULONG Vector,
IN KIRQL Irql,
IN KIRQL SynchronizeIrql,
IN KINTERRUPT_MODE InterruptMode,
IN BOOLEAN ShareVector,
IN CHAR ProcessorNumber,
IN BOOLEAN FloatingSave)
{
UNIMPLEMENTED;
}

View file

@ -0,0 +1,45 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* PURPOSE: Routines for IRQL support
* PROGRAMMERS: Timo Kreuzer
*/
/* INCLUDES *****************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* FUNCTIONS ****************************************************************/
NTKERNELAPI
KIRQL
KxGetCurrentIrql(VOID)
{
return KeGetCurrentIrql();
}
NTKERNELAPI
VOID
KxLowerIrql(IN KIRQL NewIrql)
{
KeLowerIrql(NewIrql);
}
NTKERNELAPI
KIRQL
KxRaiseIrql(IN KIRQL NewIrql)
{
return KfRaiseIrql(NewIrql);
}
NTKERNELAPI
KIRQL
KxRaiseIrqlToDpcLevel(VOID)
{
return KeRaiseIrqlToDpcLevel();
}
/* EOF */

View file

@ -0,0 +1,502 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ke/i386/kiinit.c
* PURPOSE: Kernel Initialization for x86 CPUs
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
* Timo Kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES *****************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#define REQUIRED_FEATURE_BITS (KF_RDTSC|KF_CR4|KF_CMPXCHG8B|KF_XMMI|KF_XMMI64| \
KF_NX_BIT)
/* GLOBALS *******************************************************************/
/* Function pointer for early debug prints */
ULONG (*FrLdrDbgPrint)(const char *Format, ...);
/* Spinlocks used only on X86 */
KSPIN_LOCK KiFreezeExecutionLock;
/* BIOS Memory Map. Not NTLDR-compliant yet */
extern ULONG KeMemoryMapRangeCount;
extern ADDRESS_RANGE KeMemoryMap[64];
KIPCR KiInitialPcr;
/* Boot and double-fault/NMI/DPC stack */
UCHAR DECLSPEC_ALIGN(16) P0BootStackData[KERNEL_STACK_SIZE] = {0};
UCHAR DECLSPEC_ALIGN(16) KiDoubleFaultStackData[KERNEL_STACK_SIZE] = {0};
ULONG_PTR P0BootStack = (ULONG_PTR)&P0BootStackData[KERNEL_STACK_SIZE];
ULONG_PTR KiDoubleFaultStack = (ULONG_PTR)&KiDoubleFaultStackData[KERNEL_STACK_SIZE];
/* FUNCTIONS *****************************************************************/
VOID
NTAPI
KiInitMachineDependent(VOID)
{
/* Check for large page support */
if (KeFeatureBits & KF_LARGE_PAGE)
{
/* FIXME: Support this */
DPRINT1("Large Page support detected but not yet taken advantage of!\n");
}
/* Check for global page support */
if (KeFeatureBits & KF_GLOBAL_PAGE)
{
/* FIXME: Support this */
DPRINT1("Global Page support detected but not yet taken advantage of!\n");
}
/* Check if we have MTRR */
if (KeFeatureBits & KF_MTRR)
{
/* FIXME: Support this */
DPRINT1("MTRR support detected but not yet taken advantage of!\n");
}
/* Check for PAT and/or MTRR support */
if (KeFeatureBits & KF_PAT)
{
/* FIXME: Support this */
DPRINT1("PAT support detected but not yet taken advantage of!\n");
}
}
VOID
NTAPI
KiInitializePcr(IN PKIPCR Pcr,
IN ULONG ProcessorNumber,
IN PKTHREAD IdleThread,
IN PVOID DpcStack)
{
KDESCRIPTOR GdtDescriptor = {{0},0,0}, IdtDescriptor = {{0},0,0};
PKGDTENTRY64 TssEntry;
USHORT Tr = 0;
/* Zero out the PCR */
RtlZeroMemory(Pcr, PAGE_SIZE);
/* Set pointers to ourselves */
Pcr->Self = (PKPCR)Pcr;
Pcr->CurrentPrcb = &Pcr->Prcb;
/* Set the PCR Version */
Pcr->MajorVersion = PCR_MAJOR_VERSION;
Pcr->MinorVersion = PCR_MINOR_VERSION;
/* Set the PRCB Version */
Pcr->Prcb.MajorVersion = 1;
Pcr->Prcb.MinorVersion = 1;
/* Set the Build Type */
Pcr->Prcb.BuildType = 0;
#ifndef CONFIG_SMP
Pcr->Prcb.BuildType |= PRCB_BUILD_UNIPROCESSOR;
#endif
#ifdef DBG
Pcr->Prcb.BuildType |= PRCB_BUILD_DEBUG;
#endif
/* Set the Processor Number and current Processor Mask */
Pcr->Prcb.Number = (UCHAR)ProcessorNumber;
Pcr->Prcb.SetMember = 1 << ProcessorNumber;
/* Get GDT and IDT descriptors */
__sgdt(&GdtDescriptor.Limit);
__sidt(&IdtDescriptor.Limit);
Pcr->GdtBase = (PVOID)GdtDescriptor.Base;
Pcr->IdtBase = (PKIDTENTRY)IdtDescriptor.Base;
/* Get TSS Selector */
__str(&Tr);
ASSERT(Tr == KGDT64_SYS_TSS);
/* Get TSS Entry */
TssEntry = KiGetGdtEntry(Pcr->GdtBase, Tr);
/* Get the KTSS itself */
Pcr->TssBase = KiGetGdtDescriptorBase(TssEntry);
Pcr->Prcb.RspBase = Pcr->TssBase->Rsp0; // FIXME
/* Set DPC Stack */
Pcr->Prcb.DpcStack = DpcStack;
/* Setup the processor set */
Pcr->Prcb.MultiThreadProcessorSet = Pcr->Prcb.SetMember;
/* Clear DR6/7 to cleanup bootloader debugging */
Pcr->Prcb.ProcessorState.SpecialRegisters.KernelDr6 = 0;
Pcr->Prcb.ProcessorState.SpecialRegisters.KernelDr7 = 0;
/* Set the Current Thread */
Pcr->Prcb.CurrentThread = IdleThread;
/* Start us out at PASSIVE_LEVEL */
Pcr->Irql = PASSIVE_LEVEL;
KeSetCurrentIrql(PASSIVE_LEVEL);
}
VOID
NTAPI
KiInitializeCpuFeatures(ULONG Cpu)
{
ULONG FeatureBits;
/* Get the processor features for this CPU */
FeatureBits = KiGetFeatureBits();
/* Check if we support all needed features */
if ((FeatureBits & REQUIRED_FEATURE_BITS) != REQUIRED_FEATURE_BITS)
{
/* If not, bugcheck system */
FrLdrDbgPrint("CPU doesn't have needed features! Has: 0x%x, required: 0x%x\n",
FeatureBits, REQUIRED_FEATURE_BITS);
KeBugCheck(0);
}
/* Set DEP to always on */
SharedUserData->NXSupportPolicy = NX_SUPPORT_POLICY_ALWAYSON;
FeatureBits |= KF_NX_ENABLED;
/* Save feature bits */
KeGetCurrentPrcb()->FeatureBits = FeatureBits;
/* Enable fx save restore support */
__writecr4(__readcr4() | CR4_FXSR);
/* Enable XMMI exceptions */
__writecr4(__readcr4() | CR4_XMMEXCPT);
/* Enable Write-Protection */
__writecr0(__readcr0() | CR0_WP);
/* Disable fpu monitoring */
__writecr0(__readcr0() & ~CR0_MP);
/* Disable x87 fpu exceptions */
__writecr0(__readcr0() & ~CR0_NE);
}
VOID
NTAPI
KiInitializeKernel(IN PKPROCESS InitProcess,
IN PKTHREAD InitThread,
IN PVOID IdleStack,
IN PKPRCB Prcb,
IN CCHAR Number,
IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
ULONG PageDirectory[2];
PVOID DpcStack;
/* Detect and set the CPU Type */
KiSetProcessorType();
/* Initialize the Power Management Support for this PRCB */
// PoInitializePrcb(Prcb);
/* Save CPU state */
KiSaveProcessorControlState(&Prcb->ProcessorState);
/* Get cache line information for this CPU */
KiGetCacheInformation();
/* Initialize spinlocks and DPC data */
KiInitSpinLocks(Prcb, Number);
/* Check if this is the Boot CPU */
if (Number == 0)
{
/* Set Node Data */
KeNodeBlock[0] = &KiNode0;
Prcb->ParentNode = KeNodeBlock[0];
KeNodeBlock[0]->ProcessorMask = Prcb->SetMember;
/* Set boot-level flags */
KeProcessorArchitecture = PROCESSOR_ARCHITECTURE_AMD64;
KeProcessorLevel = (USHORT)Prcb->CpuType;
if (Prcb->CpuID) KeProcessorRevision = Prcb->CpuStep;
/* Set the current MP Master KPRCB to the Boot PRCB */
Prcb->MultiThreadSetMaster = Prcb;
/* Lower to APC_LEVEL */
KeLowerIrql(APC_LEVEL);
/* Initialize some spinlocks */
KeInitializeSpinLock(&KiFreezeExecutionLock);
/* Initialize portable parts of the OS */
KiInitSystem();
/* Initialize the Idle Process and the Process Listhead */
InitializeListHead(&KiProcessListHead);
PageDirectory[0] = 0;
PageDirectory[1] = 0;
KeInitializeProcess(InitProcess,
0,
0xFFFFFFFF,
PageDirectory,
FALSE);
InitProcess->QuantumReset = MAXCHAR;
}
else
{
/* FIXME */
DPRINT1("SMP Boot support not yet present\n");
}
/* HACK for MmUpdatePageDir */
((PETHREAD)InitThread)->ThreadsProcess = (PEPROCESS)InitProcess;
/* Setup the Idle Thread */
KeInitializeThread(InitProcess,
InitThread,
NULL,
NULL,
NULL,
NULL,
NULL,
IdleStack);
InitThread->NextProcessor = Number;
InitThread->Priority = HIGH_PRIORITY;
InitThread->State = Running;
InitThread->Affinity = 1 << Number;
InitThread->WaitIrql = DISPATCH_LEVEL;
InitProcess->ActiveProcessors = 1 << Number;
/* Set basic CPU Features that user mode can read */
SharedUserData->ProcessorFeatures[PF_MMX_INSTRUCTIONS_AVAILABLE] =
(KeFeatureBits & KF_MMX) ? TRUE: FALSE;
SharedUserData->ProcessorFeatures[PF_COMPARE_EXCHANGE_DOUBLE] =
(KeFeatureBits & KF_CMPXCHG8B) ? TRUE: FALSE;
SharedUserData->ProcessorFeatures[PF_XMMI_INSTRUCTIONS_AVAILABLE] =
((KeFeatureBits & KF_FXSR) && (KeFeatureBits & KF_XMMI)) ? TRUE: FALSE;
SharedUserData->ProcessorFeatures[PF_XMMI64_INSTRUCTIONS_AVAILABLE] =
((KeFeatureBits & KF_FXSR) && (KeFeatureBits & KF_XMMI64)) ? TRUE: FALSE;
SharedUserData->ProcessorFeatures[PF_3DNOW_INSTRUCTIONS_AVAILABLE] =
(KeFeatureBits & KF_3DNOW) ? TRUE: FALSE;
SharedUserData->ProcessorFeatures[PF_RDTSC_INSTRUCTION_AVAILABLE] =
(KeFeatureBits & KF_RDTSC) ? TRUE: FALSE;
/* Set up the thread-related fields in the PRCB */
Prcb->CurrentThread = InitThread;
Prcb->NextThread = NULL;
Prcb->IdleThread = InitThread;
/* Initialize the Kernel Executive */
ExpInitializeExecutive(Number, LoaderBlock);
/* Only do this on the boot CPU */
if (Number == 0)
{
/* Calculate the time reciprocal */
KiTimeIncrementReciprocal =
KiComputeReciprocal(KeMaximumIncrement,
&KiTimeIncrementShiftCount);
/* Update DPC Values in case they got updated by the executive */
Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
Prcb->MinimumDpcRate = KiMinimumDpcRate;
Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
/* Allocate the DPC Stack */
DpcStack = MmCreateKernelStack(FALSE, 0);
if (!DpcStack) KeBugCheckEx(NO_PAGES_AVAILABLE, 1, 0, 0, 0);
Prcb->DpcStack = DpcStack;
/* Allocate the IOPM save area. */
// Ki386IopmSaveArea = ExAllocatePoolWithTag(PagedPool,
// PAGE_SIZE * 2,
// TAG('K', 'e', ' ', ' '));
// if (!Ki386IopmSaveArea)
// {
// /* Bugcheck. We need this for V86/VDM support. */
// KeBugCheckEx(NO_PAGES_AVAILABLE, 2, PAGE_SIZE * 2, 0, 0);
// }
}
/* Raise to Dispatch */
KfRaiseIrql(DISPATCH_LEVEL);
/* Set the Idle Priority to 0. This will jump into Phase 1 */
KeSetPriorityThread(InitThread, 0);
/* If there's no thread scheduled, put this CPU in the Idle summary */
KiAcquirePrcbLock(Prcb);
if (!Prcb->NextThread) KiIdleSummary |= 1 << Number;
KiReleasePrcbLock(Prcb);
/* Raise back to HIGH_LEVEL and clear the PRCB for the loader block */
KfRaiseIrql(HIGH_LEVEL);
LoaderBlock->Prcb = 0;
}
VOID
NTAPI
KiSystemStartup(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
ULONG Cpu;
PKTHREAD InitialThread;
ULONG64 InitialStack;
PKIPCR Pcr;
/* HACK */
FrLdrDbgPrint = LoaderBlock->u.I386.CommonDataArea;
FrLdrDbgPrint("Hello from KiSystemStartup!!!\n");
/* HACK, because freeldr maps page 0 */
MiAddressToPte((PVOID)0)->u.Hard.Valid = 0;
/* Save the loader block */
KeLoaderBlock = LoaderBlock;
/* Get the current CPU number */
Cpu = KeNumberProcessors++; // FIXME
/* LoaderBlock initialization for Cpu 0 */
if (Cpu == 0)
{
/* Set the initial stack, idle thread and process */
LoaderBlock->KernelStack = (ULONG_PTR)P0BootStack;
LoaderBlock->Thread = (ULONG_PTR)&KiInitialThread;
LoaderBlock->Process = (ULONG_PTR)&KiInitialProcess.Pcb;
LoaderBlock->Prcb = (ULONG_PTR)&KiInitialPcr.Prcb;
}
/* Get Pcr from loader block */
Pcr = CONTAINING_RECORD(LoaderBlock->Prcb, KIPCR, Prcb);
/* Set the PRCB for this Processor */
KiProcessorBlock[Cpu] = &Pcr->Prcb;
/* Set GS base */
__writemsr(X86_MSR_GSBASE, (ULONG64)Pcr);
__writemsr(X86_MSR_KERNEL_GSBASE, (ULONG64)Pcr);
/* LDT is unused */
__lldt(0);
/* Align stack to 16 bytes */
LoaderBlock->KernelStack &= ~(16 - 1);
/* Save the initial thread and stack */
InitialStack = LoaderBlock->KernelStack; // Checkme
InitialThread = (PKTHREAD)LoaderBlock->Thread;
/* Clean the APC List Head */
InitializeListHead(&InitialThread->ApcState.ApcListHead[KernelMode]);
/* Set us as the current process */
InitialThread->ApcState.Process = (PVOID)LoaderBlock->Process;
/* Initialize the PCR */
KiInitializePcr(Pcr, Cpu, InitialThread, (PVOID)KiDoubleFaultStack);
/* Initialize the CPU features */
KiInitializeCpuFeatures(Cpu);
/* Initial setup for the boot CPU */
if (Cpu == 0)
{
/* Setup the TSS descriptors and entries */
KiInitializeTss(Pcr->TssBase, InitialStack);
/* Setup the IDT */
KeInitExceptions();
/* HACK: misuse this function to pass a function pointer to kdcom */
KdDebuggerInitialize1((PVOID)FrLdrDbgPrint);
/* Initialize debugging system */
KdInitSystem(0, KeLoaderBlock);
/* Check for break-in */
if (KdPollBreakIn()) DbgBreakPointWithStatus(DBG_STATUS_CONTROL_C);
/* Hack! Wait for the debugger! */
#ifdef _WINKD_
while (!KdPollBreakIn());
DbgBreakPointWithStatus(DBG_STATUS_CONTROL_C);
#endif
}
DPRINT("Pcr = %p, Gdt = %p, Idt = %p, Tss = %p\n",
Pcr, Pcr->GdtBase, Pcr->IdtBase, Pcr->TssBase);
/* Acquire lock */
while (InterlockedBitTestAndSet64((PLONG64)&KiFreezeExecutionLock, 0))
{
/* Loop until lock is free */
while ((*(volatile KSPIN_LOCK*)&KiFreezeExecutionLock) & 1);
}
/* Initialize the Processor with HAL */
HalInitializeProcessor(Cpu, KeLoaderBlock);
/* Set processor as active */
KeActiveProcessors |= 1 << Cpu;
/* Release lock */
InterlockedAnd64((PLONG64)&KiFreezeExecutionLock, 0);
/* Raise to HIGH_LEVEL */
KfRaiseIrql(HIGH_LEVEL);
/* Switch to new kernel stack and start kernel bootstrapping */
KiSetupStackAndInitializeKernel(&KiInitialProcess.Pcb,
InitialThread,
(PVOID)InitialStack,
&Pcr->Prcb,
(CCHAR)Cpu,
KeLoaderBlock);
}
VOID
NTAPI
KiInitializeKernelAndGotoIdleLoop(IN PKPROCESS InitProcess,
IN PKTHREAD InitThread,
IN PVOID IdleStack,
IN PKPRCB Prcb,
IN CCHAR Number,
IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
// DbgBreakPointWithStatus(0);
/* Initialize kernel */
KiInitializeKernel(InitProcess,
InitThread,
IdleStack,
Prcb,
Number,
KeLoaderBlock);
/* Set the priority of this thread to 0 */
InitThread->Priority = 0;
/* Force interrupts enabled and lower IRQL back to DISPATCH_LEVEL */
_enable();
KeLowerIrql(DISPATCH_LEVEL);
/* Set the right wait IRQL */
InitThread->WaitIrql = DISPATCH_LEVEL;
/* Jump into the idle loop */
KiIdleLoop();
}

View file

@ -0,0 +1,196 @@
/*
* PROJECT: ReactOS HAL
* LICENSE: GPL - See COPYING in the top level directory
* FILE: hal/halx86/up/spinlock.c
* PURPOSE: Spinlock and Queued Spinlock Support
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
*/
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#undef KeAcquireSpinLock
#undef KeReleaseSpinLock
/* FUNCTIONS *****************************************************************/
/*
* @implemented
*/
KIRQL
KeAcquireSpinLockRaiseToSynch(PKSPIN_LOCK SpinLock)
{
#ifndef CONFIG_SMP
KIRQL OldIrql;
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
return OldIrql;
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
KIRQL
NTAPI
KeAcquireSpinLockRaiseToDpc(PKSPIN_LOCK SpinLock)
{
#ifndef CONFIG_SMP
KIRQL OldIrql;
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
return OldIrql;
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
VOID
NTAPI
KeReleaseSpinLock(PKSPIN_LOCK SpinLock,
KIRQL OldIrql)
{
#ifndef CONFIG_SMP
/* Simply lower IRQL back */
KeLowerIrql(OldIrql);
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
KIRQL
KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
#ifndef CONFIG_SMP
KIRQL OldIrql;
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
return OldIrql;
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
KIRQL
KeAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
{
#ifndef CONFIG_SMP
KIRQL OldIrql;
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
return OldIrql;
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
VOID
KeAcquireInStackQueuedSpinLock(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
#ifndef CONFIG_SMP
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, &LockHandle->OldIrql);
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
VOID
KeAcquireInStackQueuedSpinLockRaiseToSynch(IN PKSPIN_LOCK SpinLock,
IN PKLOCK_QUEUE_HANDLE LockHandle)
{
#ifndef CONFIG_SMP
/* Simply raise to synch */
KeRaiseIrql(SYNCH_LEVEL, &LockHandle->OldIrql);
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
VOID
KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
IN KIRQL OldIrql)
{
#ifndef CONFIG_SMP
/* Simply lower IRQL back */
KeLowerIrql(OldIrql);
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
VOID
KeReleaseInStackQueuedSpinLock(IN PKLOCK_QUEUE_HANDLE LockHandle)
{
#ifndef CONFIG_SMP
/* Simply lower IRQL back */
KeLowerIrql(LockHandle->OldIrql);
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
BOOLEAN
KeTryToAcquireQueuedSpinLockRaiseToSynch(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
IN PKIRQL OldIrql)
{
#ifndef CONFIG_SMP
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
/* Always return true on UP Machines */
return TRUE;
#else
UNIMPLEMENTED;
#endif
}
/*
* @implemented
*/
LOGICAL
KeTryToAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber,
OUT PKIRQL OldIrql)
{
#ifndef CONFIG_SMP
/* Simply raise to dispatch */
KeRaiseIrql(DISPATCH_LEVEL, OldIrql);
/* Always return true on UP Machines */
return TRUE;
#else
UNIMPLEMENTED;
#endif
}
/* EOF */

View file

@ -0,0 +1,188 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* PURPOSE: stubs
* PROGRAMMERS: Timo Kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
VOID
FASTCALL
KeZeroPages(IN PVOID Address,
IN ULONG Size)
{
/* Not using XMMI in this routine */
RtlZeroMemory(Address, Size);
}
VOID
FASTCALL
DECLSPEC_NORETURN
KiServiceExit(IN PKTRAP_FRAME TrapFrame,
IN NTSTATUS Status)
{
UNIMPLEMENTED;
}
VOID
FASTCALL
DECLSPEC_NORETURN
KiServiceExit2(IN PKTRAP_FRAME TrapFrame)
{
UNIMPLEMENTED;
}
BOOLEAN
NTAPI
KeConnectInterrupt(IN PKINTERRUPT Interrupt)
{
UNIMPLEMENTED;
return FALSE;
}
PVOID
NTAPI
KeSwitchKernelStack(PVOID StackBase, PVOID StackLimit)
{
UNIMPLEMENTED;
return NULL;
}
BOOLEAN
NTAPI
KeSynchronizeExecution(
IN OUT PKINTERRUPT Interrupt,
IN PKSYNCHRONIZE_ROUTINE SynchronizeRoutine,
IN PVOID SynchronizeContext)
{
UNIMPLEMENTED;
return FALSE;
}
NTSTATUS
NTAPI
KeUserModeCallback(IN ULONG RoutineIndex,
IN PVOID Argument,
IN ULONG ArgumentLength,
OUT PVOID *Result,
OUT PULONG ResultLength)
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
VOID
KiIdleLoop()
{
UNIMPLEMENTED;
for(;;);
}
VOID
NTAPI
KiInitializeUserApc(IN PKEXCEPTION_FRAME ExceptionFrame,
IN PKTRAP_FRAME TrapFrame,
IN PKNORMAL_ROUTINE NormalRoutine,
IN PVOID NormalContext,
IN PVOID SystemArgument1,
IN PVOID SystemArgument2)
{
UNIMPLEMENTED;
}
VOID
NTAPI
KiSwapProcess(IN PKPROCESS NewProcess,
IN PKPROCESS OldProcess)
{
UNIMPLEMENTED;
}
VOID
KiSystemService(IN PKTHREAD Thread,
IN PKTRAP_FRAME TrapFrame,
IN ULONG Instruction)
{
UNIMPLEMENTED;
}
NTSYSAPI
NTSTATUS
NTAPI
NtCallbackReturn
( IN PVOID Result OPTIONAL, IN ULONG ResultLength, IN NTSTATUS Status )
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
NTSTATUS
NTAPI
NtSetLdtEntries
(ULONG Selector1, LDT_ENTRY LdtEntry1, ULONG Selector2, LDT_ENTRY LdtEntry2)
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
NTSTATUS
NTAPI
NtVdmControl(IN ULONG ControlCode,
IN PVOID ControlData)
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
NTSTATUS
NTAPI
KiCallUserMode(
IN PVOID *OutputBuffer,
IN PULONG OutputLength)
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
#undef ExQueryDepthSList
NTKERNELAPI
USHORT
ExQueryDepthSList(IN PSLIST_HEADER ListHead)
{
return (USHORT)(ListHead->Alignment & 0xffff);
}
ULONG ProcessCount;
#ifdef _MSC_VER
void
__GSHandlerCheck()
{
}
int __security_cookie;
void
__security_check_cookie()
{
}
BOOLEAN CcPfEnablePrefetcher;
unsigned long __readfsdword(const unsigned long Offset)
{
return 0;
}
void main()
{
}
#endif

View file

@ -0,0 +1,193 @@
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/ke/i386/thread.c
* PURPOSE: i386 Thread Context Creation
* PROGRAMMER: Alex Ionescu (alex@relsoft.net)
*/
/* INCLUDES ******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
typedef struct _KSWITCHFRAME
{
PVOID ExceptionList;
BOOLEAN ApcBypassDisable;
PVOID RetAddr;
} KSWITCHFRAME, *PKSWITCHFRAME;
typedef struct _KSTART_FRAME
{
PKSYSTEM_ROUTINE SystemRoutine;
PKSTART_ROUTINE StartRoutine;
PVOID StartContext;
BOOLEAN UserThread;
} KSTART_FRAME, *PKSTART_FRAME;
typedef struct _KUINIT_FRAME
{
KSWITCHFRAME CtxSwitchFrame;
KSTART_FRAME StartFrame;
KTRAP_FRAME TrapFrame;
//FX_SAVE_AREA FxSaveArea;
} KUINIT_FRAME, *PKUINIT_FRAME;
typedef struct _KKINIT_FRAME
{
KSWITCHFRAME CtxSwitchFrame;
KSTART_FRAME StartFrame;
//FX_SAVE_AREA FxSaveArea;
} KKINIT_FRAME, *PKKINIT_FRAME;
/* FUNCTIONS *****************************************************************/
VOID
NTAPI
KiInitializeContextThread(IN PKTHREAD Thread,
IN PKSYSTEM_ROUTINE SystemRoutine,
IN PKSTART_ROUTINE StartRoutine,
IN PVOID StartContext,
IN PCONTEXT ContextPointer)
{
//PFX_SAVE_AREA FxSaveArea;
//PFXSAVE_FORMAT FxSaveFormat;
PKSTART_FRAME StartFrame;
PKSWITCHFRAME CtxSwitchFrame;
PKTRAP_FRAME TrapFrame;
CONTEXT LocalContext;
PCONTEXT Context = NULL;
ULONG ContextFlags;
/* Check if this is a With-Context Thread */
if (ContextPointer)
{
/* Set up the Initial Frame */
PKUINIT_FRAME InitFrame;
InitFrame = (PKUINIT_FRAME)((ULONG_PTR)Thread->InitialStack -
sizeof(KUINIT_FRAME));
/* Copy over the context we got */
RtlCopyMemory(&LocalContext, ContextPointer, sizeof(CONTEXT));
Context = &LocalContext;
ContextFlags = CONTEXT_CONTROL;
/* Zero out the trap frame and save area */
RtlZeroMemory(&InitFrame->TrapFrame,
KTRAP_FRAME_LENGTH);
/* Setup the Fx Area */
//FxSaveArea = &InitFrame->FxSaveArea;
// /* Get the FX Save Format Area */
// FxSaveFormat = (PFXSAVE_FORMAT)Context->ExtendedRegisters;
//
// /* Set an initial state */
// FxSaveFormat->ControlWord = 0x27F;
// FxSaveFormat->StatusWord = 0;
// FxSaveFormat->TagWord = 0;
// FxSaveFormat->ErrorOffset = 0;
// FxSaveFormat->ErrorSelector = 0;
// FxSaveFormat->DataOffset = 0;
// FxSaveFormat->DataSelector = 0;
// FxSaveFormat->MXCsr = 0x1F80;
/* Set an intial NPX State */
//Context->FloatSave.Cr0NpxState = 0;
//FxSaveArea->Cr0NpxState = 0;
//FxSaveArea->NpxSavedCpu = 0;
/* Now set the context flags depending on XMM support */
//ContextFlags |= (KeI386FxsrPresent) ? CONTEXT_EXTENDED_REGISTERS :
// CONTEXT_FLOATING_POINT;
/* Set the Thread's NPX State */
Thread->NpxState = 0xA;
Thread->DispatcherHeader.NpxIrql = PASSIVE_LEVEL;
/* Disable any debug regiseters */
Context->ContextFlags &= ~CONTEXT_DEBUG_REGISTERS;
/* Setup the Trap Frame */
TrapFrame = &InitFrame->TrapFrame;
/* Set up a trap frame from the context. */
KeContextToTrapFrame(Context,
NULL,
TrapFrame,
Context->ContextFlags | ContextFlags,
UserMode);
/* Set SS, DS, ES's RPL Mask properly */
TrapFrame->SegSs |= RPL_MASK;
TrapFrame->SegDs |= RPL_MASK;
TrapFrame->SegEs |= RPL_MASK;
TrapFrame->Dr7 = 0;
/* Set the previous mode as user */
TrapFrame->PreviousMode = UserMode;
/* Terminate the Exception Handler List */
TrapFrame->ExceptionFrame = 0;
/* Setup the Stack for KiThreadStartup and Context Switching */
StartFrame = &InitFrame->StartFrame;
CtxSwitchFrame = &InitFrame->CtxSwitchFrame;
/* Tell the thread it will run in User Mode */
Thread->PreviousMode = UserMode;
/* Tell KiThreadStartup of that too */
StartFrame->UserThread = TRUE;
}
else
{
/* Set up the Initial Frame for the system thread */
PKKINIT_FRAME InitFrame;
InitFrame = (PKKINIT_FRAME)((ULONG_PTR)Thread->InitialStack -
sizeof(KKINIT_FRAME));
/* Setup the Fx Area */
//FxSaveArea = &InitFrame->FxSaveArea;
//RtlZeroMemory(FxSaveArea, sizeof(FX_SAVE_AREA));
/* Check if we have Fxsr support */
DPRINT1("FxsrPresent but did nothing\n");
// /* Set the stub FX area */
// FxSaveArea->U.FxArea.ControlWord = 0x27F;
// FxSaveArea->U.FxArea.MXCsr = 0x1F80;
/* No NPX State */
Thread->NpxState = 0xA;
/* Setup the Stack for KiThreadStartup and Context Switching */
StartFrame = &InitFrame->StartFrame;
CtxSwitchFrame = &InitFrame->CtxSwitchFrame;
/* Tell the thread it will run in Kernel Mode */
Thread->PreviousMode = KernelMode;
/* Tell KiThreadStartup of that too */
StartFrame->UserThread = FALSE;
}
/* Now setup the remaining data for KiThreadStartup */
StartFrame->StartContext = StartContext;
StartFrame->StartRoutine = StartRoutine;
StartFrame->SystemRoutine = SystemRoutine;
/* And set up the Context Switch Frame */
CtxSwitchFrame->RetAddr = KiThreadStartup;
CtxSwitchFrame->ApcBypassDisable = TRUE;
CtxSwitchFrame->ExceptionList = EXCEPTION_CHAIN_END;;
/* Save back the new value of the kernel stack. */
Thread->KernelStack = (PVOID)CtxSwitchFrame;
}
/* EOF */

View file

@ -0,0 +1,948 @@
/*
* FILE: ntoskrnl/ke/amd64/trap.S
* COPYRIGHT: See COPYING in the top level directory
* PURPOSE: System Traps, Entrypoints and Exitpoints
* PROGRAMMER: Timo Kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES ******************************************************************/
#include <reactos/asm.h>
#include <ndk/amd64/asm.h>
EXTERN KiDispatchException:PROC
EXTERN FrLdrDbgPrint:DWORD
EXTERN KeBugCheckWithTf:PROC
EXTERN MmAccessFault:PROC
EXTERN KiSystemFatalException:PROC
EXTERN KiNpxNotAvailableFaultHandler:PROC
EXTERN KiGeneralProtectionFaultHandler:PROC
EXTERN KiXmmExceptionHandler:PROC
/* GLOBALS *******************************************************************/
.data
PUBLIC MsgUnimplemented
MsgUnimplemented:
.asciz "WARNING: %s at %s:%d is UNIMPLEMENTED!\n"
MsgPageFault:
.asciz "Page fault! Code = 0x%x, RIP = %p, FaultingAddress = %p\n"
MsgGeneralProtFault:
.asciz "General protection fault at %p!\n"
MsgBreakpointTrap:
.asciz "BreakpointTrap at %p\n"
MsgUnexpectedInterrupt:
.asciz "UnexpectedInterrupt Vector=0x%02lx\n"
MsgInvalidOpcodeFault:
.asciz "Invalid opcode fault at %p!\n"
MsgDoubleFault:
.asciz "Double fault at %p, rbp=%p!\n"
MsgTrapInfo:
.asciz "Trap: %s at %p\n"
MACRO(TRAPINFO, func)
LOCAL label1, label2
#if 0
jmp label2
label1: .asciz "\func"
label2:
sub rsp, 32
lea rcx, MsgTrapInfo[rip]
lea rdx, 1b[rip]
mov r8, [rbp + KTRAP_FRAME_Rip]
call qword ptr FrLdrDbgPrint[rip]
add rsp, 32
#endif
ENDM
/* Helper Macros *************************************************************/
#define TRAPFLAG_VOLATILES HEX(01)
#define TRAPFLAG_NONVOLATILES HEX(02)
#define TRAPFLAG_XMM HEX(04)
#define TRAPFLAG_SEGMENTS HEX(08)
#define TRAPFLAG_DEBUG HEX(10)
#define TRAPFLAG_HAS_ERRORCODE HEX(100)
#define TRAPFLAG_SYSTEMSERVICE (TRAPFLAG_VOLATILES|TRAPFLAG_DEBUG)
#define TRAPFLAG_ALL HEX(ff)
/*
* Stack Layout:
* |-------------------|
* | KTRAP_FRAME |
* |-------------------| <- rbp
* | EXCEPTION_RECORD |
* |-------------------|
* | KEXCEPTION_FRAME |
* |-------------------| <- rsp
*
*/
/*
* ENTER_TRAP_FRAME - Allocate SIZE_KTRAP_FRAME and save registers to it
*/
MACRO(ENTER_TRAP_FRAME, Flags)
LOCAL dont_swap
/* Save the trap flags for this trap */
TRAPFLAGS = VAL(Flags)
/* Size of hardware trap frame */
if (TRAPFLAGS AND TRAPFLAG_HAS_ERRORCODE)
.pushframe code
SIZE_INITIAL_FRAME = 6 * 8
else
.pushframe
SIZE_INITIAL_FRAME = 5 * 8
endif
/* Make room for a KTRAP_FRAME */
sub rsp, (SIZE_KTRAP_FRAME - SIZE_INITIAL_FRAME)
.allocstack (SIZE_KTRAP_FRAME - SIZE_INITIAL_FRAME)
.endprolog
/* Save rbp */
mov [rsp + KTRAP_FRAME_Rbp], rbp
/* Point rbp to the KTRAP_FRAME */
lea rbp, [rsp]
if (TRAPFLAGS AND TRAPFLAG_NONVOLATILES)
/* Save non-volatile registers */
mov [rbp + KTRAP_FRAME_Rbx], rbx
mov [rbp + KTRAP_FRAME_Rdi], rdi
mov [rbp + KTRAP_FRAME_Rsi], rsi
endif
if (TRAPFLAGS AND TRAPFLAG_VOLATILES)
/* Save volatile registers */
mov [rbp + KTRAP_FRAME_Rax], rax
mov [rbp + KTRAP_FRAME_Rcx], rcx
mov [rbp + KTRAP_FRAME_Rdx], rdx
mov [rbp + KTRAP_FRAME_R8], r8
mov [rbp + KTRAP_FRAME_R9], r9
mov [rbp + KTRAP_FRAME_R10], r10
mov [rbp + KTRAP_FRAME_R11], r11
endif
if (TRAPFLAGS AND TRAPFLAG_XMM)
/* Save xmm registers */
movdqa [rbp + KTRAP_FRAME_Xmm0], xmm0
movdqa [rbp + KTRAP_FRAME_Xmm1], xmm1
movdqa [rbp + KTRAP_FRAME_Xmm2], xmm2
movdqa [rbp + KTRAP_FRAME_Xmm3], xmm3
movdqa [rbp + KTRAP_FRAME_Xmm4], xmm4
movdqa [rbp + KTRAP_FRAME_Xmm5], xmm5
endif
if (TRAPFLAGS AND TRAPFLAG_SEGMENTS)
/* Save segment selectors */
mov ax, ds
mov [rbp + KTRAP_FRAME_SegDs], ax
mov ax, es
mov [rbp + KTRAP_FRAME_SegEs], ax
mov ax, fs
mov [rbp + KTRAP_FRAME_SegFs], ax
mov ax, gs
mov [rbp + KTRAP_FRAME_SegGs], ax
endif
/* Save previous mode and swap gs when it was UserMode */
mov ax, [rbp + KTRAP_FRAME_SegCs]
and al, 1
mov [rbp + KTRAP_FRAME_PreviousMode], al
jz dont_swap
swapgs
dont_swap:
/* Save previous irql */
mov rax, cr8
mov [rbp + KTRAP_FRAME_PreviousIrql], al
// KTRAP_FRAME_FaultIndicator
// KTRAP_FRAME_ExceptionActive
// KTRAP_FRAME_MxCsr
if (TRAPFLAGS AND TRAPFLAG_DEBUG)
/* Save debug registers */
mov rax, dr0
mov [rbp + KTRAP_FRAME_Dr0], rax
mov rax, dr1
mov [rbp + KTRAP_FRAME_Dr1], rax
mov rax, dr2
mov [rbp + KTRAP_FRAME_Dr2], rax
mov rax, dr3
mov [rbp + KTRAP_FRAME_Dr3], rax
mov rax, dr6
mov [rbp + KTRAP_FRAME_Dr6], rax
mov rax, dr7
mov [rbp + KTRAP_FRAME_Dr7], rax
endif
// KTRAP_FRAME_DebugControl
// KTRAP_FRAME_LastBranchToRip
// KTRAP_FRAME_LastBranchFromRip
// KTRAP_FRAME_LastExceptionToRip
// KTRAP_FRAME_LastExceptionFromRip
// KTRAP_FRAME_TrapFrame
/* Make sure the direction flag is cleared */
cld
ENDM
/*
* LEAVE_TRAP_FRAME - Restore registers and free stack space
*/
MACRO(LEAVE_TRAP_FRAME)
LOCAL dont_swap_back
if (TRAPFLAGS AND TRAPFLAG_SEGMENTS)
/* Restore segment selectors */
mov ax, [rbp + KTRAP_FRAME_SegDs]
mov ds, ax
mov ax, [rbp + KTRAP_FRAME_SegEs]
mov es, ax
mov ax, [rbp + KTRAP_FRAME_SegFs]
mov fs, ax
endif
test byte ptr [rbp + KTRAP_FRAME_PreviousMode], 1
jz dont_swap_back
swapgs
dont_swap_back:
if (TRAPFLAGS AND TRAPFLAG_NONVOLATILES)
/* Restore non-volatile registers */
mov rbx, [rbp + KTRAP_FRAME_Rbx]
mov rdi, [rbp + KTRAP_FRAME_Rdi]
mov rsi, [rbp + KTRAP_FRAME_Rsi]
endif
if (TRAPFLAGS AND TRAPFLAG_VOLATILES)
/* Restore volatile registers */
mov rax, [rbp + KTRAP_FRAME_Rax]
mov rcx, [rbp + KTRAP_FRAME_Rcx]
mov rdx, [rbp + KTRAP_FRAME_Rdx]
mov r8, [rbp + KTRAP_FRAME_R8]
mov r9, [rbp + KTRAP_FRAME_R9]
mov r10, [rbp + KTRAP_FRAME_R10]
mov r11, [rbp + KTRAP_FRAME_R11]
endif
if (TRAPFLAGS AND TRAPFLAG_XMM)
/* Restore xmm registers */
movdqa xmm0, [rbp + KTRAP_FRAME_Xmm0]
movdqa xmm1, [rbp + KTRAP_FRAME_Xmm1]
movdqa xmm2, [rbp + KTRAP_FRAME_Xmm2]
movdqa xmm3, [rbp + KTRAP_FRAME_Xmm3]
movdqa xmm4, [rbp + KTRAP_FRAME_Xmm4]
movdqa xmm5, [rbp + KTRAP_FRAME_Xmm5]
endif
/* Restore rbp */
mov rbp, [rbp + KTRAP_FRAME_Rbp]
/* Adjust stack pointer */
add rsp, KTRAP_FRAME_Rip
ENDM
/* FUNCTIONS *****************************************************************/
.text
.code64
ALIGN 8
PUBLIC InterruptDispatchTable
InterruptDispatchTable:
Vector = 0
REPEAT 256
push Vector
jmp KiUnexpectedInterrupt
ALIGN 8
Vector = Vector+1
ENDR
// rbp = TrapFrame, eax = ExceptionCode, edx = NumParams, r9,r10,r11 = params
.PROC InternalDispatchException
/* Allocate stack space for EXCEPTION_RECORD and KEXCEPTION_FRAME */
sub rsp, SIZE_EXCEPTION_RECORD + SIZE_KEXCEPTION_FRAME
.allocstack (SIZE_EXCEPTION_RECORD + SIZE_KEXCEPTION_FRAME)
.endprolog
/* Set up EXCEPTION_RECORD */
lea rcx, [rsp + SIZE_KEXCEPTION_FRAME]
mov [rcx + EXCEPTION_RECORD_ExceptionCode], eax
xor rax, rax
mov [rcx + EXCEPTION_RECORD_ExceptionFlags], eax
mov [rcx + EXCEPTION_RECORD_ExceptionRecord], rax
mov rax, [rbp + KTRAP_FRAME_Rip]
mov [rcx + EXCEPTION_RECORD_ExceptionAddress], rax
mov [rcx + EXCEPTION_RECORD_NumberParameters], edx
mov [rcx + EXCEPTION_RECORD_ExceptionInformation + HEX(00)], r9
mov [rcx + EXCEPTION_RECORD_ExceptionInformation + HEX(08)], r10
mov [rcx + EXCEPTION_RECORD_ExceptionInformation + HEX(10)], r11
/* Set up KEXCEPTION_FRAME */
mov rax, [rbp + KTRAP_FRAME_Rbp]
mov [rsp + KEXCEPTION_FRAME_Rbp], rax
mov [rsp + KEXCEPTION_FRAME_Rbx], rbx
mov [rsp + KEXCEPTION_FRAME_Rdi], rdi
mov [rsp + KEXCEPTION_FRAME_Rsi], rsi
mov [rsp + KEXCEPTION_FRAME_R12], r12
mov [rsp + KEXCEPTION_FRAME_R13], r13
mov [rsp + KEXCEPTION_FRAME_R14], r14
mov [rsp + KEXCEPTION_FRAME_R15], r15
movdqa [rsp + KEXCEPTION_FRAME_Xmm6], xmm6
movdqa [rsp + KEXCEPTION_FRAME_Xmm7], xmm7
movdqa [rsp + KEXCEPTION_FRAME_Xmm8], xmm8
movdqa [rsp + KEXCEPTION_FRAME_Xmm9], xmm9
movdqa [rsp + KEXCEPTION_FRAME_Xmm10], xmm10
movdqa [rsp + KEXCEPTION_FRAME_Xmm11], xmm11
movdqa [rsp + KEXCEPTION_FRAME_Xmm12], xmm12
movdqa [rsp + KEXCEPTION_FRAME_Xmm13], xmm13
movdqa [rsp + KEXCEPTION_FRAME_Xmm14], xmm14
movdqa [rsp + KEXCEPTION_FRAME_Xmm15], xmm15
mov qword ptr [rsp + KEXCEPTION_FRAME_Return], 0
/* Call KiDispatchException */
// rcx already points to ExceptionRecord
mov rdx, rsp // ExceptionFrame
mov r8, rbp // TrapFrame
mov r9b, [r8 + KTRAP_FRAME_PreviousMode] // PreviousMode
mov byte ptr [rsp + KEXCEPTION_FRAME_P5], 1 // FirstChance
call KiDispatchException
/* Restore registers */
mov r12, [rsp + KEXCEPTION_FRAME_R12]
mov r13, [rsp + KEXCEPTION_FRAME_R13]
mov r14, [rsp + KEXCEPTION_FRAME_R14]
mov r15, [rsp + KEXCEPTION_FRAME_R15]
movdqa xmm6, [rsp + KEXCEPTION_FRAME_Xmm6]
movdqa xmm7, [rsp + KEXCEPTION_FRAME_Xmm7]
movdqa xmm8, [rsp + KEXCEPTION_FRAME_Xmm8]
movdqa xmm9, [rsp + KEXCEPTION_FRAME_Xmm9]
movdqa xmm10, [rsp + KEXCEPTION_FRAME_Xmm10]
movdqa xmm11, [rsp + KEXCEPTION_FRAME_Xmm11]
movdqa xmm12, [rsp + KEXCEPTION_FRAME_Xmm12]
movdqa xmm13, [rsp + KEXCEPTION_FRAME_Xmm13]
movdqa xmm14, [rsp + KEXCEPTION_FRAME_Xmm14]
movdqa xmm15, [rsp + KEXCEPTION_FRAME_Xmm15]
add rsp, SIZE_EXCEPTION_RECORD + SIZE_KEXCEPTION_FRAME
ret
.ENDP InternalDispatchException
/* SOFTWARE INTERRUPT SERVICES ***********************************************/
PUBLIC KiDivideErrorFault
.PROC KiDivideErrorFault
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
/* Enable interrupts */
sti
/* Dispatch the exception */
mov eax, STATUS_INTEGER_DIVIDE_BY_ZERO
mov edx, 0
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
/* Return */
LEAVE_TRAP_FRAME
iretq
.ENDP KiDivideErrorFault
PUBLIC KiDebugTrapOrFault
.PROC KiDebugTrapOrFault
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
TRAPINFO KiDebugTrapOrFault
/* Check if the frame was from kernelmode */
test word ptr [rbp + KTRAP_FRAME_SegCs], 3
jz KiDebugTrapOrFaultKMode
/* Enable interrupts for user-mode */
sti
KiDebugTrapOrFaultKMode:
/* Dispatch the exception */
mov eax, STATUS_SINGLE_STEP
mov edx, 0
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
/* Return */
LEAVE_TRAP_FRAME
iretq
.ENDP KiDebugTrapOrFault
PUBLIC KiNmiInterrupt
.PROC KiNmiInterrupt
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
UNIMPLEMENTED KiNmiInterrupt
jmp $
/* Return */
LEAVE_TRAP_FRAME
iretq
.ENDP KiNmiInterrupt
PUBLIC KiBreakpointTrap
.PROC KiBreakpointTrap
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
TRAPINFO KiBreakpointTrap
// lea rcx, MsgBreakpointTrap[rip]
// mov rdx, rsp
// call qword ptr FrLdrDbgPrint[rip]
/* Dispatch the exception */
mov eax, STATUS_BREAKPOINT
mov edx, 3
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
/* Return */
LEAVE_TRAP_FRAME
iretq
.ENDP KiBreakpointTrap
PUBLIC KiOverflowTrap
.PROC KiOverflowTrap
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
/* Enable interrupts */
sti
/* Dispatch the exception */
mov eax, STATUS_INTEGER_OVERFLOW
mov edx, 3
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
/* Return */
LEAVE_TRAP_FRAME
iretq
.ENDP KiOverflowTrap
PUBLIC KiBoundFault
.PROC KiBoundFault
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
/* Check if the frame was from kernelmode */
test word ptr [rbp + KTRAP_FRAME_SegCs], 3
jnz KiBoundFaltUserMode
/* Bugcheck */
mov ecx, EXCEPTION_BOUND_CHECK
mov rdx, rbp
call KiSystemFatalException
KiBoundFaltUserMode:
/* Enable interrupts for user-mode */
sti
/* Dispatch the exception */
mov eax, STATUS_INTEGER_OVERFLOW
mov edx, 3
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
/* Return */
LEAVE_TRAP_FRAME
iretq
.ENDP KiBoundFault
PUBLIC KiInvalidOpcodeFault
.PROC KiInvalidOpcodeFault
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
TRAPINFO KiInvalidOpcodeFault
mov rdx, [rbp + KTRAP_FRAME_Rip]
lea rcx, MsgInvalidOpcodeFault[rip]
call qword ptr FrLdrDbgPrint[rip]
/* Enable interrupts */
sti
/* Check if the frame was from kernelmode */
test word ptr [rbp + KTRAP_FRAME_SegCs], 3
jz KiInvalidOpcodeKernel
// FIXME: handle STATUS_INVALID_LOCK_SEQUENCE
KiInvalidOpcodeKernel:
/* Kernel mode fault */
/* Dispatch the exception */
mov eax, STATUS_ILLEGAL_INSTRUCTION
mov edx, 3
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
/* Return */
LEAVE_TRAP_FRAME
iretq
.ENDP KiInvalidOpcodeFault
PUBLIC KiNpxNotAvailableFault
.PROC KiNpxNotAvailableFault
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
/* Call the C handler */
mov rcx, rbp
call KiNpxNotAvailableFaultHandler
/* Check the return status code */
test eax, eax
jz KiNpxNotAvailableFaultExit
/* Dispatch the exception */
mov edx, 3
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
KiNpxNotAvailableFaultExit:
/* Return */
LEAVE_TRAP_FRAME
iretq
.ENDP KiNpxNotAvailableFault
PUBLIC KiDoubleFaultAbort
.PROC KiDoubleFaultAbort
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
lea rcx, MsgDoubleFault[rip]
mov rdx, [rbp + KTRAP_FRAME_FaultAddress]
mov r8, rbp
call qword ptr FrLdrDbgPrint[rip]
/* Bugcheck */
mov ecx, 8 // EXCEPTION_DOUBLE_FAULT
mov rdx, rbp
call KiSystemFatalException
jmp $
.ENDP KiDoubleFaultAbort
PUBLIC KiNpxSegmentOverrunAbort
.PROC KiNpxSegmentOverrunAbort
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
/* Bugcheck */
mov ecx, EXCEPTION_NPX_OVERRUN
mov rdx, rbp
call KiSystemFatalException
jmp $
.ENDP KiNpxSegmentOverrunAbort
PUBLIC KiInvalidTssFault
.PROC KiInvalidTssFault
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
/* Bugcheck */
mov ecx, EXCEPTION_INVALID_TSS
mov rdx, rbp
call KiSystemFatalException
jmp $
.ENDP KiInvalidTssFault
PUBLIC KiSegmentNotPresentFault
.PROC KiSegmentNotPresentFault
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
/* Bugcheck */
mov ecx, EXCEPTION_SEGMENT_NOT_PRESENT
mov rdx, rbp
call KiSystemFatalException
jmp $
.ENDP KiSegmentNotPresentFault
PUBLIC KiStackFault
.PROC KiStackFault
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
/* Bugcheck */
mov ecx, EXCEPTION_STACK_FAULT
mov rdx, rbp
call KiSystemFatalException
jmp $
.ENDP KiStackFault
PUBLIC KiGeneralProtectionFault
.PROC KiGeneralProtectionFault
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
TRAPINFO KiGeneralProtectionFault
mov rdx, [rbp + KTRAP_FRAME_Rip]
lea rcx, MsgGeneralProtFault[rip]
call qword ptr FrLdrDbgPrint[rip]
/* Call the C handler */
call KiGeneralProtectionFaultHandler
/* Check for success */
test eax, eax
jge KiGpfExit
/* Dispatch the exception */
mov edx, 3
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
KiGpfFatal:
/* Bugcheck */
mov ecx, UNEXPECTED_KERNEL_MODE_TRAP
mov rdx, HEX(000D) // EXCEPTION_GP_FAULT
xor r8, r8
mov r9, [rbp + KTRAP_FRAME_ErrorCode] // error code
sub rsp, 8
mov [rsp + KTRAP_FRAME_P5+8], rbp // trap frame
call KeBugCheckWithTf
KiGpfExit:
/* Return */
LEAVE_TRAP_FRAME
iretq
.ENDP KiGeneralProtectionFault
PUBLIC KiPageFault
.PROC KiPageFault
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
TRAPINFO KiPageFault
#if 0
lea rcx, MsgPageFault[rip]
mov rdx, [rbp + KTRAP_FRAME_ErrorCode]
mov r8, [rbp + KTRAP_FRAME_Rip]
mov r9, [rbp + KTRAP_FRAME_FaultAddress]
call qword ptr FrLdrDbgPrint[rip]
#endif
/* Save page fault address */
mov rdx, cr2
mov [rbp + KTRAP_FRAME_FaultAddress], rdx
/* Call page fault handler */
mov ecx, [rbp + KTRAP_FRAME_ErrorCode] // StoreInstruction
and ecx, 1
// rdx == Address
mov r8b, [rbp + KTRAP_FRAME_SegCs] // Mode
and r8b, 1
mov r9, rbp // TrapInformation
call MmAccessFault
/* Check for success */
test eax, eax
jge PageFaultReturn
/* Set parameter 1 to error code */
mov r9d, [rbp + KTRAP_FRAME_ErrorCode]
/* Set parameter2 to faulting address */
mov r10, cr2 // Param2 = faulting address
cmp eax, STATUS_ACCESS_VIOLATION
je AccessViolation
cmp eax, STATUS_GUARD_PAGE_VIOLATION
je SpecialCode
cmp eax, STATUS_STACK_OVERFLOW
je SpecialCode
InPageException:
/* Dispatch in-page exception */
mov r11d, eax // Param3 = Status
mov eax, STATUS_IN_PAGE_ERROR // ExceptionCode
mov edx, 3 // ParamCount
call InternalDispatchException
jmp PageFaultReturn
AccessViolation:
/* Use more proper status code */
mov eax, KI_EXCEPTION_ACCESS_VIOLATION
SpecialCode:
/* Setup a normal page fault exception */
mov edx, 2 // ParamCount
call InternalDispatchException
PageFaultReturn:
LEAVE_TRAP_FRAME
iretq
.ENDP KiPageFault
PUBLIC KiFloatingErrorFault
.PROC KiFloatingErrorFault
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
UNIMPLEMENTED KiFloatingErrorFault
jmp $
.ENDP KiFloatingErrorFault
PUBLIC KiAlignmentFault
.PROC KiAlignmentFault
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
/* Enable interrupts */
sti
/* Bugcheck */
mov ecx, EXCEPTION_ALIGNMENT_CHECK
mov rdx, rbp
call KiSystemFatalException
jmp $
.ENDP KiAlignmentFault
PUBLIC KiMcheckAbort
.PROC KiMcheckAbort
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
/* Bugcheck */
mov ecx, HEX(12)
mov rdx, rbp
call KiSystemFatalException
jmp $
.ENDP KiMcheckAbort
PUBLIC KiXmmException
.PROC KiXmmException
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
/* Call the C handler */
mov rcx, rbp
call KiXmmExceptionHandler
/* Check for success */
test eax, eax
jge KiXmmExit
/* Dispatch the exception */
mov edx, 3
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
KiXmmExit:
LEAVE_TRAP_FRAME
iretq
.ENDP KiXmmException
PUBLIC KiApcInterrupt
.PROC KiApcInterrupt
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
UNIMPLEMENTED KiApcInterrupt
jmp $
.ENDP KiApcInterrupt
PUBLIC KiRaiseAssertion
.PROC KiRaiseAssertion
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
/* Decrement RIP to point to the INT2C instruction (2 bytes, not 1 like INT3) */
sub qword ptr [rbp + KTRAP_FRAME_Rip], 2
/* Dispatch the exception */
mov eax, STATUS_ASSERTION_FAILURE
mov edx, 0
mov r9, 0
mov r10, 0
mov r11, 0
call InternalDispatchException
LEAVE_TRAP_FRAME
iretq
.ENDP KiRaiseAssertion
PUBLIC KiDebugServiceTrap
.PROC KiDebugServiceTrap
/* Push pseudo error code */
ENTER_TRAP_FRAME TRAPFLAG_ALL
TRAPINFO KiDebugServiceTrap
/* Increase Rip to skip the int3 */
inc qword ptr [rbp + KTRAP_FRAME_Rip]
/* Dispatch the exception */
mov eax, STATUS_BREAKPOINT
mov edx, 3
mov r9, [rbp+KTRAP_FRAME_Rax] // Service
mov r10, [rbp+KTRAP_FRAME_Rcx] // Buffer
mov r11, [rbp+KTRAP_FRAME_Rdx] // Length
call InternalDispatchException
LEAVE_TRAP_FRAME;
iretq
.ENDP KiDebugServiceTrap
PUBLIC KiDpcInterrupt
.PROC KiDpcInterrupt
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
UNIMPLEMENTED KiDpcInterrupt
jmp $
.ENDP KiDpcInterrupt
PUBLIC KiIpiInterrupt
.PROC KiIpiInterrupt
/* We have an error code */
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
UNIMPLEMENTED KiIpiInterrupt
jmp $
.ENDP KiIpiInterrupt
PUBLIC KiUnexpectedInterrupt
.PROC KiUnexpectedInterrupt
/* The error code is the vector */
cli
ENTER_TRAP_FRAME (TRAPFLAG_HAS_ERRORCODE OR TRAPFLAG_ALL)
/* Set bugcheck parameters */
mov ecx, TRAP_CAUSE_UNKNOWN
mov rdx, [rbp + KTRAP_FRAME_ErrorCode] // the vector
mov r8, 0 // The unknown floating-point exception
mov r9, 0 // The enabled and asserted status bits
sub rsp, 8
mov [rbp + KTRAP_FRAME_P5 + 8], rbp // trap frame
call KeBugCheckWithTf
jmp $
.ENDP KiUnexpectedInterrupt
#ifdef _MSC_VER
//void __lgdt(void *Source);
PUBLIC __lgdt
__lgdt:
lgdt fword ptr [rcx]
ret
//void __sgdt(void *Destination);
PUBLIC __sgdt
__sgdt:
sgdt fword ptr [rcx]
ret
// void __lldt(unsigned short Value)
PUBLIC __lldt
__lldt:
lldt cx
ret
//void __sldt(void *Destination);
PUBLIC __sldt
__sldt:
sldt word ptr [rcx]
ret
//void __ltr(unsigned short Source);
PUBLIC __ltr
__ltr:
ltr cx
ret
//void __str(unsigned short *Destination);
PUBLIC __str
__str:
str word ptr [rcx]
ret
#endif
END

View file

@ -0,0 +1,884 @@
/*
* COPYRIGHT: GPL, See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/amd64/init.c
* PURPOSE: Memory Manager Initialization for amd64
*
* PROGRAMMERS: Timo kreuzer (timo.kreuzer@reactos.org)
* ReactOS Portable Systems Group
*/
/* INCLUDES ***************************************************************/
#include <ntoskrnl.h>
//#define NDEBUG
#include <debug.h>
#include "../ARM3/miarm.h"
#ifdef _WINKD_
extern PMMPTE MmDebugPte;
#endif
VOID
NTAPI
HalInitializeBios(ULONG Unknown, PLOADER_PARAMETER_BLOCK LoaderBlock);
/* GLOBALS *****************************************************************/
/* Template PTE and PDE for a kernel page */
MMPTE ValidKernelPde = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
MMPTE ValidKernelPte = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
MMPDE DemandZeroPde = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS)};
MMPTE PrototypePte = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS) | PTE_PROTOTYPE | 0xFFFFF000};
/* Sizes */
///SIZE_T MmSessionSize = MI_SESSION_SIZE;
SIZE_T MmSessionViewSize = MI_SESSION_VIEW_SIZE;
SIZE_T MmSessionPoolSize = MI_SESSION_POOL_SIZE;
SIZE_T MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
SIZE_T MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
SIZE_T MiNonPagedSystemSize;
/* Address ranges */
ULONG64 MmUserProbeAddress = 0x7FFFFFF0000ULL;
PVOID MmHighestUserAddress = (PVOID)0x7FFFFFEFFFFULL;
PVOID MmSystemRangeStart = (PVOID)0xFFFF080000000000ULL;
PVOID MmSessionBase; // FFFFF90000000000 = MiSessionPoolStart
PVOID MiSessionPoolStart; // FFFFF90000000000 = MiSessionPoolEnd - MmSessionPoolSize
PVOID MiSessionPoolEnd; // = MiSessionViewStart
PVOID MiSessionViewStart; // = MiSessionViewEnd - MmSessionViewSize
PVOID MiSessionViewEnd; // FFFFF97FFF000000
PVOID MiSessionImageStart; // ?FFFFF97FFF000000 = MiSessionImageEnd - MmSessionImageSize
PVOID MiSessionImageEnd; // FFFFF98000000000 = MiSessionSpaceEnd
PVOID MiSessionSpaceEnd = MI_SESSION_SPACE_END; // FFFFF98000000000
PVOID MmSystemCacheStart; // FFFFF98000000000
PVOID MmSystemCacheEnd; // FFFFFA8000000000
/// PVOID MmPagedPoolStart = MI_PAGED_POOL_START; // FFFFFA8000000000
PVOID MmPagedPoolEnd; // FFFFFAA000000000
PVOID MiSystemViewStart;
PVOID MmNonPagedSystemStart; // FFFFFAA000000000
PVOID MmNonPagedPoolStart;
PVOID MmNonPagedPoolExpansionStart;
///PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END; // 0xFFFFFAE000000000
PVOID MmHyperSpaceEnd = (PVOID)HYPER_SPACE_END;
MMSUPPORT MmSystemCacheWs;
ULONG64 MxPfnSizeInBytes;
PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
ULONG MiNumberDescriptors = 0;
PFN_NUMBER MiSystemPages = 0;
BOOLEAN MiIncludeType[LoaderMaximum];
PFN_NUMBER MxFreePageBase;
ULONG64 MxFreePageCount = 0;
extern PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
BOOLEAN MiPfnsInitialized = FALSE;
/* FUNCTIONS *****************************************************************/
ULONG
NoDbgPrint(const char *Format, ...)
{
return 0;
}
VOID
NTAPI
MiEvaluateMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
PLIST_ENTRY ListEntry;
PFN_NUMBER LastPage;
ULONG i;
/* Get the size of the boot loader's image allocations */
MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned * PAGE_SIZE;
MmBootImageSize = ROUND_UP(MmBootImageSize, 4 * 1024 * 1024);
/* Instantiate memory that we don't consider RAM/usable */
for (i = 0; i < LoaderMaximum; i++) MiIncludeType[i] = TRUE;
MiIncludeType[LoaderBad] = FALSE;
MiIncludeType[LoaderFirmwarePermanent] = FALSE;
MiIncludeType[LoaderSpecialMemory] = FALSE;
MiIncludeType[LoaderBBTMemory] = FALSE;
/* Loop the memory descriptors */
for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
ListEntry != &LoaderBlock->MemoryDescriptorListHead;
ListEntry = ListEntry->Flink)
{
/* Get the memory descriptor */
Descriptor = CONTAINING_RECORD(ListEntry,
MEMORY_ALLOCATION_DESCRIPTOR,
ListEntry);
/* Count it */
MiNumberDescriptors++;
/* Skip pages that are not part of the PFN database */
if (!MiIncludeType[Descriptor->MemoryType])
{
continue;
}
/* Add this to the total of pages */
MmNumberOfPhysicalPages += Descriptor->PageCount;
/* Check if this is the new lowest page */
if (Descriptor->BasePage < MmLowestPhysicalPage)
{
/* Update the lowest page */
MmLowestPhysicalPage = Descriptor->BasePage;
}
/* Check if this is the new highest page */
LastPage = Descriptor->BasePage + Descriptor->PageCount - 1;
if (LastPage > MmHighestPhysicalPage)
{
/* Update the highest page */
MmHighestPhysicalPage = LastPage;
}
/* Check if this is currently free memory */
if ((Descriptor->MemoryType == LoaderFree) ||
(Descriptor->MemoryType == LoaderLoadedProgram) ||
(Descriptor->MemoryType == LoaderFirmwareTemporary) ||
(Descriptor->MemoryType == LoaderOsloaderStack))
{
/* Check if this is the largest memory descriptor */
if (Descriptor->PageCount > MxFreePageCount)
{
/* For now, it is */
MxFreeDescriptor = Descriptor;
MxFreePageBase = Descriptor->BasePage;
MxFreePageCount = Descriptor->PageCount;
}
}
else
{
/* Add it to the amount of system used pages */
MiSystemPages += Descriptor->PageCount;
}
}
}
PFN_NUMBER
NTAPI
MiEarlyAllocPage()
{
PFN_NUMBER Pfn;
if (MiPfnsInitialized)
{
return MmAllocPage(MC_SYSTEM);
}
/* Make sure we have enough pages */
if (!MxFreePageCount)
{
/* Crash the system */
KeBugCheckEx(INSTALL_MORE_MEMORY,
MmNumberOfPhysicalPages,
MxFreeDescriptor->PageCount,
MxOldFreeDescriptor.PageCount,
1);
}
/* Use our lowest usable free pages */
Pfn = MxFreePageBase;
MxFreePageBase++;
MxFreePageCount--;
return Pfn;
}
PMMPTE
NTAPI
MxGetPte(PVOID Address)
{
PMMPTE Pte;
MMPTE TmplPte;
/* Setup template pte */
TmplPte.u.Long = 0;
TmplPte.u.Flush.Valid = 1;
TmplPte.u.Flush.Write = 1;
/* Get a pointer to the PXE */
Pte = MiAddressToPxe(Address);
if (!Pte->u.Hard.Valid)
{
/* It's not valid, map it! */
TmplPte.u.Hard.PageFrameNumber = MiEarlyAllocPage();
*Pte = TmplPte;
/* Zero the page */
RtlZeroMemory(MiPteToAddress(Pte), PAGE_SIZE);
}
/* Get a pointer to the PPE */
Pte = MiAddressToPpe(Address);
if (!Pte->u.Hard.Valid)
{
/* It's not valid, map it! */
TmplPte.u.Hard.PageFrameNumber = MiEarlyAllocPage();
*Pte = TmplPte;
/* Zero the page */
RtlZeroMemory(MiPteToAddress(Pte), PAGE_SIZE);
}
/* Get a pointer to the PDE */
Pte = MiAddressToPde(Address);
if (!Pte->u.Hard.Valid)
{
/* It's not valid, map it! */
TmplPte.u.Hard.PageFrameNumber = MiEarlyAllocPage();
*Pte = TmplPte;
/* Zero the page */
RtlZeroMemory(MiPteToAddress(Pte), PAGE_SIZE);
}
/* Get a pointer to the PTE */
Pte = MiAddressToPte(Address);
return Pte;
}
VOID
NTAPI
MxMapPage(PVOID Address)
{
MMPTE TmplPte, *Pte;
/* Setup template pte */
TmplPte.u.Long = 0;
TmplPte.u.Flush.Valid = 1;
TmplPte.u.Flush.Write = 1;
TmplPte.u.Hard.PageFrameNumber = MiEarlyAllocPage();
/* Get the PTE for that page */
Pte = MxGetPte(Address);
ASSERT(Pte->u.Hard.Valid == 0);
/* Map a physical page */
*Pte = TmplPte;
}
VOID
MxMapPageRange(PVOID Address, ULONG64 PageCount)
{
while (PageCount--)
{
/* Map the page */
MxMapPage(Address);
/* Goto next page */
Address = (PVOID)((ULONG64)Address + PAGE_SIZE);
}
}
VOID
NTAPI
MiPreparePfnDatabse(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
PLIST_ENTRY ListEntry;
PUCHAR Page, FirstPage;
SIZE_T Size;
/* Calculate the size of the PFN database and convert to pages */
MxPfnSizeInBytes = ROUND_TO_PAGES((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
MxPfnAllocation = MxPfnSizeInBytes >> PAGE_SHIFT;
/* Simply start at hardcoded address */
MmPfnDatabase = MI_PFN_DATABASE;
/* Loop the memory descriptors */
for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
ListEntry != &LoaderBlock->MemoryDescriptorListHead;
ListEntry = ListEntry->Flink)
{
/* Get the memory descriptor */
Descriptor = CONTAINING_RECORD(ListEntry,
MEMORY_ALLOCATION_DESCRIPTOR,
ListEntry);
/* Skip pages that are not part of the PFN database */
if (MiIncludeType[Descriptor->MemoryType])
{
/* Get the base and size of this pfn database entry */
FirstPage = PAGE_ALIGN(&MmPfnDatabase[Descriptor->BasePage]);
Size = ROUND_TO_PAGES(Descriptor->PageCount * sizeof(MMPFN));
/* Loop the pages of this Pfn database entry */
for (Page = FirstPage; Page < FirstPage + Size; Page += PAGE_SIZE)
{
/* Is the page already mapped? */
if (!MmIsAddressValid(Page))
{
/* It's not, map it now */
MxMapPage(Page);
RtlZeroMemory(Page, PAGE_SIZE);
}
}
/* Zero out the pages */
RtlZeroMemory(FirstPage, Size);
}
}
}
VOID
NTAPI
MiInitializeSessionSpace(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
/* Set up session space */
MiSessionSpaceEnd = (PVOID)MI_SESSION_SPACE_END;
/* This is where we will load Win32k.sys and the video driver */
MiSessionImageEnd = MiSessionSpaceEnd;
MiSessionImageStart = (PCHAR)MiSessionImageEnd - MmSessionImageSize;
/* The view starts right below the session working set (itself below
* the image area) */
MiSessionViewEnd = MI_SESSION_VIEW_END;
MiSessionViewStart = (PCHAR)MiSessionViewEnd - MmSessionViewSize;
ASSERT(IS_PAGE_ALIGNED(MiSessionViewStart));
/* Session pool follows */
MiSessionPoolEnd = MiSessionViewStart;
MiSessionPoolStart = (PCHAR)MiSessionPoolEnd - MmSessionPoolSize;
ASSERT(IS_PAGE_ALIGNED(MiSessionPoolStart));
/* And it all begins here */
MmSessionBase = MiSessionPoolStart;
/* System view space ends at session space, so now that we know where
* this is, we can compute the base address of system view space itself. */
MiSystemViewStart = (PCHAR)MmSessionBase - MmSystemViewSize;
ASSERT(IS_PAGE_ALIGNED(MiSystemViewStart));
/* Sanity checks */
ASSERT(MiSessionViewEnd <= MiSessionImageStart);
ASSERT(MmSessionBase <= MiSessionPoolStart);
}
VOID
MiInitializePageTable()
{
ULONG64 PageFrameOffset;
MMPTE TmplPte, *Pte;
/* HACK: don't use freeldr debug print anymore */
//FrLdrDbgPrint = NoDbgPrint;
/* Get current directory base */
MmSystemPageDirectory[0] = ((PMMPTE)PXE_SELFMAP)->u.Hard.PageFrameNumber;
PageFrameOffset = MmSystemPageDirectory[0] << PAGE_SHIFT;
ASSERT(PageFrameOffset == __readcr3());
/* Set directory base for the system process */
PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameOffset;
/* Enable global pages */
__writecr4(__readcr4() | CR4_PGE);
ASSERT(__readcr4() & CR4_PGE);
/* Enable no execute */
__writemsr(X86_MSR_EFER, __readmsr(X86_MSR_EFER) | EFER_NXE);
/* Loop the user mode PXEs */
for (Pte = MiAddressToPxe(0);
Pte <= MiAddressToPxe(MmHighestUserAddress);
Pte++)
{
/* Zero the PXE, clear all mappings */
Pte->u.Long = 0;
}
/* Flush the TLB */
KeFlushCurrentTb();
/* Set up a template PTE */
TmplPte.u.Long = 0;
TmplPte.u.Flush.Valid = 1;
TmplPte.u.Flush.Write = 1;
HyperTemplatePte = TmplPte;
/* Create PDPTs (72 KB) for shared system address space,
* skip page tables and hyperspace */
/* Loop the PXEs */
for (Pte = MiAddressToPxe((PVOID)(HYPER_SPACE_END + 1));
Pte <= MiAddressToPxe(MI_HIGHEST_SYSTEM_ADDRESS);
Pte++)
{
/* Is the PXE already valid? */
if (!Pte->u.Hard.Valid)
{
/* It's not Initialize it */
TmplPte.u.Flush.PageFrameNumber = MiEarlyAllocPage(0);
*Pte = TmplPte;
/* Zero the page. The PXE is the PTE for the PDPT. */
RtlZeroMemory(MiPteToAddress(Pte), PAGE_SIZE);
}
}
/* Setup the mapping PTEs */
MmFirstReservedMappingPte = MxGetPte((PVOID)MI_MAPPING_RANGE_START);
MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
MmLastReservedMappingPte = MiAddressToPte((PVOID)MI_MAPPING_RANGE_END);
#ifdef _WINKD_
/* Setup debug mapping PTE */
MmDebugPte = MxGetPte(MI_DEBUG_MAPPING);
#endif
}
VOID
NTAPI
MiBuildNonPagedPool(VOID)
{
PMMPTE Pte;
PFN_COUNT PageCount;
/* Check if this is a machine with less than 256MB of RAM, and no overide */
if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
!(MmSizeOfNonPagedPoolInBytes))
{
/* Force the non paged pool to be 2MB so we can reduce RAM usage */
MmSizeOfNonPagedPoolInBytes = 2 * 1024 * 1024;
}
/* Check if the user gave a ridicuously large nonpaged pool RAM size */
if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
(MmNumberOfPhysicalPages * 7 / 8))
{
/* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
MmSizeOfNonPagedPoolInBytes = 0;
}
/* Check if no registry setting was set, or if the setting was too low */
if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
{
/* Start with the minimum (256 KB) and add 32 KB for each MB above 4 */
MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
MmSizeOfNonPagedPoolInBytes += (MmNumberOfPhysicalPages - 1024) /
256 * MmMinAdditionNonPagedPoolPerMb;
}
/* Check if the registy setting or our dynamic calculation was too high */
if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
{
/* Set it to the maximum */
MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
}
/* Check if a percentage cap was set through the registry */
if (MmMaximumNonPagedPoolPercent)
{
/* Don't feel like supporting this right now */
UNIMPLEMENTED;
}
/* Page-align the nonpaged pool size */
MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
/* Now, check if there was a registry size for the maximum size */
if (!MmMaximumNonPagedPoolInBytes)
{
/* Start with the default (1MB) and add 400 KB for each MB above 4 */
MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
MmMaximumNonPagedPoolInBytes += (MmNumberOfPhysicalPages - 1024) /
256 * MmMaxAdditionNonPagedPoolPerMb;
}
/* Don't let the maximum go too high */
if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
{
/* Set it to the upper limit */
MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
}
/* Put non paged pool to the end of the region */
MmNonPagedPoolStart = (PCHAR)MmNonPagedPoolEnd - MmMaximumNonPagedPoolInBytes;
/* Make sure it doesn't collide with the PFN database */
if ((PCHAR)MmNonPagedPoolStart < (PCHAR)MmPfnDatabase + MxPfnSizeInBytes)
{
/* Put non paged pool after the PFN database */
MmNonPagedPoolStart = (PCHAR)MmPfnDatabase + MxPfnSizeInBytes;
MmMaximumNonPagedPoolInBytes = (ULONG64)MmNonPagedPoolEnd -
(ULONG64)MmNonPagedPoolStart;
}
ASSERT(IS_PAGE_ALIGNED(MmNonPagedPoolStart));
/* Calculate the nonpaged pool expansion start region */
MmNonPagedPoolExpansionStart = (PCHAR)MmNonPagedPoolStart +
MmSizeOfNonPagedPoolInBytes;
ASSERT(IS_PAGE_ALIGNED(MmNonPagedPoolExpansionStart));
/* Map the nonpaged pool */
PageCount = (MmSizeOfNonPagedPoolInBytes + PAGE_SIZE - 1) / PAGE_SIZE;
MxMapPageRange(MmNonPagedPoolStart, PageCount);
/* Loop the non paged pool extension PTEs */
for (Pte = MiAddressToPte(MmNonPagedPoolExpansionStart);
Pte <= MiAddressToPte(MmNonPagedPoolEnd);
Pte++)
{
/* Create PXE, PPE, PDE and zero the PTE */
MxGetPte(MiPteToAddress(Pte))->u.Long = 0;
}
/* Initialize the ARM3 nonpaged pool */
MiInitializeNonPagedPool();
/* Initialize the nonpaged pool */
InitializePool(NonPagedPool, 0);
}
VOID
NTAPI
MiBuildSystemPteSpace()
{
PMMPTE Pte, StartPte, EndPte;
/* Use the default numer of system PTEs */
MmNumberOfSystemPtes = MI_NUMBER_SYSTEM_PTES;
/* System PTE pool is below the PFN database */
MiNonPagedSystemSize = (MmNumberOfSystemPtes + 1) * PAGE_SIZE;
MmNonPagedSystemStart = (PCHAR)MmPfnDatabase - MiNonPagedSystemSize;
MmNonPagedSystemStart = MM_ROUND_DOWN(MmNonPagedSystemStart, 512 * PAGE_SIZE);
/* Don't let it go below the minimum */
if (MmNonPagedSystemStart < (PVOID)MI_NON_PAGED_SYSTEM_START_MIN)
{
/* This is a hard-coded limit in the Windows NT address space */
MmNonPagedSystemStart = (PVOID)MI_NON_PAGED_SYSTEM_START_MIN;
/* Reduce the amount of system PTEs to reach this point */
MmNumberOfSystemPtes = ((ULONG64)MmPfnDatabase -
(ULONG64)MmNonPagedSystemStart) >>
PAGE_SHIFT;
MmNumberOfSystemPtes--;
ASSERT(MmNumberOfSystemPtes > 1000);
}
/* Set the range of system PTEs */
StartPte = MiAddressToPte(MI_SYSTEM_PTE_START);
EndPte = StartPte + MmNumberOfSystemPtes - 1;
/* Loop the system PTEs */
for (Pte = StartPte; Pte <= EndPte; Pte++)
{
/* Create PXE, PPE, PDE and zero the PTE */
MxGetPte(MiPteToAddress(Pte))->u.Long = 0;
}
/* Create the system PTE space */
Pte = MiAddressToPte(MI_SYSTEM_PTE_START);
MiInitializeSystemPtes(Pte, MmNumberOfSystemPtes, SystemPteSpace);
/* Reserve system PTEs for zeroing PTEs and clear them */
MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES, SystemPteSpace);
RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
/* Set the counter to maximum */
MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
}
VOID
NTAPI
MiBuildPhysicalMemoryBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
PPHYSICAL_MEMORY_DESCRIPTOR Buffer;
PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
PLIST_ENTRY ListEntry;
PFN_NUMBER NextPage = -1;
PULONG Bitmap;
ULONG Runs = 0;
ULONG Size;
/* Calculate size for the PFN bitmap */
Size = ROUND_UP(MmHighestPhysicalPage + 1, sizeof(ULONG));
/* Allocate the PFN bitmap */
Bitmap = ExAllocatePoolWithTag(NonPagedPool, Size, ' mM');
/* Allocate enough memory for the physical memory block */
Buffer = ExAllocatePoolWithTag(NonPagedPool,
sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
sizeof(PHYSICAL_MEMORY_RUN) *
(MiNumberDescriptors - 1),
'lMmM');
if (!Bitmap || !Buffer)
{
/* This is critical */
KeBugCheckEx(INSTALL_MORE_MEMORY,
MmNumberOfPhysicalPages,
MmLowestPhysicalPage,
MmHighestPhysicalPage,
0x101);
}
/* Initialize the bitmap and clear all bits */
RtlInitializeBitMap(&MiPfnBitMap, Bitmap, MmHighestPhysicalPage + 1);
RtlClearAllBits(&MiPfnBitMap);
/* Loop the memory descriptors */
for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
ListEntry != &LoaderBlock->MemoryDescriptorListHead;
ListEntry = ListEntry->Flink)
{
/* Get the memory descriptor */
Descriptor = CONTAINING_RECORD(ListEntry,
MEMORY_ALLOCATION_DESCRIPTOR,
ListEntry);
/* Skip pages that are not part of the PFN database */
if (!MiIncludeType[Descriptor->MemoryType])
{
continue;
}
/* Does the memory block begin where the last ended? */
if (Descriptor->BasePage == NextPage)
{
/* Add it to the current run */
Buffer->Run[Runs - 1].PageCount += Descriptor->PageCount;
}
else
{
/* Create a new run */
Runs++;
Buffer->Run[Runs - 1].BasePage = Descriptor->BasePage;
Buffer->Run[Runs - 1].PageCount = Descriptor->PageCount;
}
/* Set the bits in the PFN bitmap */
RtlSetBits(&MiPfnBitMap, Descriptor->BasePage, Descriptor->PageCount);
/* Set the next page */
NextPage = Descriptor->BasePage + Descriptor->PageCount;
}
// FIXME: allocate a buffer of better size
Buffer->NumberOfRuns = Runs;
Buffer->NumberOfPages = MmNumberOfPhysicalPages;
MmPhysicalMemoryBlock = Buffer;
}
VOID
NTAPI
MiBuildPagedPool_x(VOID)
{
PMMPTE Pte;
MMPTE TmplPte;
ULONG Size, BitMapSize;
/* Default size for paged pool is 4 times non paged pool */
MmSizeOfPagedPoolInBytes = 4 * MmMaximumNonPagedPoolInBytes;
/* Make sure it doesn't overflow */
if (MmSizeOfPagedPoolInBytes > ((ULONG64)MmNonPagedSystemStart -
(ULONG64)MmPagedPoolStart))
{
MmSizeOfPagedPoolInBytes = (ULONG64)MmNonPagedSystemStart -
(ULONG64)MmPagedPoolStart;
}
/* Make sure paged pool is big enough */
if (MmSizeOfPagedPoolInBytes < MI_MIN_INIT_PAGED_POOLSIZE)
{
MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
}
/* Align down to a PDE boundary */
MmSizeOfPagedPoolInBytes = ROUND_DOWN(MmSizeOfPagedPoolInBytes,
512 * PAGE_SIZE);
MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
/* This is where paged pool ends */
MmPagedPoolEnd = (PCHAR)MmPagedPoolStart + MmSizeOfPagedPoolInBytes - 1;
/* Sanity check */
ASSERT(MmPagedPoolEnd < MmNonPagedSystemStart);
/* setup a template PTE */
TmplPte.u.Long = 0;
TmplPte.u.Flush.Valid = 1;
TmplPte.u.Flush.Write = 1;
/* Make sure the PXE is valid */
Pte = MiAddressToPxe(MmPagedPoolStart);
if (!Pte->u.Flush.Valid)
{
/* Map it! */
TmplPte.u.Flush.PageFrameNumber = MmAllocPage(MC_SYSTEM);
*Pte = TmplPte;
}
/* Map all page directories (max 128) */
for (Pte = MiAddressToPpe(MmPagedPoolStart);
Pte <= MiAddressToPpe(MmPagedPoolEnd);
Pte++)
{
if (!Pte->u.Flush.Valid)
{
/* Map it! */
TmplPte.u.Flush.PageFrameNumber = MiEarlyAllocPage();
*Pte = TmplPte;
}
}
/* Create and map the first PTE for paged pool */
Pte = MxGetPte(MmPagedPoolStart);
TmplPte.u.Flush.PageFrameNumber = MiEarlyAllocPage();
*Pte = TmplPte;
/* Save the first and last paged pool PTE */
MmPagedPoolInfo.FirstPteForPagedPool = MiAddressToPte(MmPagedPoolStart);
MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
MmPagedPoolInfo.NextPdeForPagedPoolExpansion =
MiAddressToPde(MmPagedPoolStart) + 1;
// We keep track of each page via a bit, so check how big the bitmap will
// have to be (make sure to align our page count such that it fits nicely
// into a 4-byte aligned bitmap.
/* The size of the bitmap in bits is the size in pages */
BitMapSize = MmSizeOfPagedPoolInPages;
/* Calculate buffer size in bytes, aligned to 32 bits */
Size = sizeof(RTL_BITMAP) + ROUND_UP(BitMapSize, 32) / 8;
// Allocate the allocation bitmap, which tells us which regions have not yet
// been mapped into memory
MmPagedPoolInfo.PagedPoolAllocationMap =
ExAllocatePoolWithTag(NonPagedPool, Size, ' mM');
ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
// Initialize it such that at first, only the first page's worth of PTEs is
// marked as allocated (incidentially, the first PDE we allocated earlier).
RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
(PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
BitMapSize);
RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 512);
// We have a second bitmap, which keeps track of where allocations end.
// Given the allocation bitmap and a base address, we can therefore figure
// out which page is the last page of that allocation, and thus how big the
// entire allocation is.
MmPagedPoolInfo.EndOfPagedPoolBitmap =
ExAllocatePoolWithTag(NonPagedPool, Size, ' mM');
ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
/* Initialize the bitmap */
RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
(PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
BitMapSize);
/* No allocations, no allocation ends; clear all bits. */
RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
/* Initialize the paged pool mutex */
KeInitializeGuardedMutex(&MmPagedPoolMutex);
/* Initialize the paged pool */
InitializePool(PagedPool, 0);
}
NTSTATUS
NTAPI
MmArmInitSystem_x(IN ULONG Phase,
IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
if (Phase == 0)
{
MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned * PAGE_SIZE;
MmBootImageSize = ROUND_UP(MmBootImageSize, PAGE_SIZE);
/* Parse memory descriptors, find free pages */
MiEvaluateMemoryDescriptors(LoaderBlock);
/* Start PFN database at hardcoded address */
MmPfnDatabase = MI_PFN_DATABASE;
/* Prepare PFN database mappings */
MiPreparePfnDatabse(LoaderBlock);
/* Initialize the session space */
MiInitializeSessionSpace(LoaderBlock);
/* Initialize some mappings */
MiInitializePageTable();
/* Update the memory descriptor, to make sure the pages we used
won't get inserted into the PFN database */
MxOldFreeDescriptor = *MxFreeDescriptor;
MxFreeDescriptor->BasePage = MxFreePageBase;
MxFreeDescriptor->PageCount = MxFreePageCount;
}
else if (Phase == 1)
{
/* The PFN database was created, restore the free descriptor */
*MxFreeDescriptor = MxOldFreeDescriptor;
/* The pfn database is ready now */
MiPfnsInitialized = TRUE;
/* Initialize the nonpaged pool */
MiBuildNonPagedPool();
/* Initialize system PTE handling */
MiBuildSystemPteSpace();
/* Build the physical memory block */
MiBuildPhysicalMemoryBlock(LoaderBlock);
/* Size up paged pool and build the shadow system page directory */
//MiBuildPagedPool();
// This is the old stuff:
MmPagedPoolBase = (PVOID)((PCHAR)MmPagedPoolEnd + 1);
MmPagedPoolSize = MM_PAGED_POOL_SIZE;
ASSERT((PCHAR)MmPagedPoolBase + MmPagedPoolSize < (PCHAR)MmNonPagedSystemStart);
HalInitializeBios(0, LoaderBlock);
}
return STATUS_SUCCESS;
}
VOID
FASTCALL
MiSyncARM3WithROS(IN PVOID AddressStart,
IN PVOID AddressEnd)
{
}
NTSTATUS
NTAPI
MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
UNIMPLEMENTED;
return STATUS_NOT_IMPLEMENTED;
}

View file

@ -0,0 +1,534 @@
/*
* COPYRIGHT: GPL, See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/amd64/page.c
* PURPOSE: Low level memory managment manipulation
*
* PROGRAMMER: Timo Kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES ***************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#undef InterlockedExchangePte
#define InterlockedExchangePte(pte1, pte2) \
InterlockedExchange64((LONG64*)&pte1->u.Long, pte2.u.Long)
#define PAGE_EXECUTE_ANY (PAGE_EXECUTE|PAGE_EXECUTE_READ|PAGE_EXECUTE_READWRITE|PAGE_EXECUTE_WRITECOPY)
#define PAGE_WRITE_ANY (PAGE_EXECUTE_READWRITE|PAGE_READWRITE|PAGE_EXECUTE_WRITECOPY|PAGE_WRITECOPY)
#define PAGE_WRITECOPY_ANY (PAGE_EXECUTE_WRITECOPY|PAGE_WRITECOPY)
extern MMPTE HyperTemplatePte;
/* GLOBALS *****************************************************************/
/* PRIVATE FUNCTIONS *******************************************************/
BOOLEAN
FORCEINLINE
MiIsHyperspaceAddress(PVOID Address)
{
return ((ULONG64)Address >= HYPER_SPACE &&
(ULONG64)Address <= HYPER_SPACE_END);
}
VOID
MiFlushTlb(PMMPTE Pte, PVOID Address)
{
if (MiIsHyperspaceAddress(Pte))
{
MmDeleteHyperspaceMapping((PVOID)PAGE_ROUND_DOWN(Pte));
}
else
{
__invlpg(Address);
}
}
static
PMMPTE
MiGetPteForProcess(
PEPROCESS Process,
PVOID Address,
BOOLEAN Create)
{
MMPTE TmplPte, *Pte;
/* Check if we need hypersapce mapping */
if (Address < MmSystemRangeStart &&
Process && Process != PsGetCurrentProcess())
{
UNIMPLEMENTED;
return NULL;
}
else if (Create)
{
TmplPte.u.Long = 0;
TmplPte.u.Flush.Valid = 1;
TmplPte.u.Flush.Write = 1;
/* Get the PXE */
Pte = MiAddressToPxe(Address);
if (!Pte->u.Hard.Valid)
{
// TmplPte.u.Hard.PageFrameNumber = MiAllocPage(TRUE);
InterlockedExchangePte(Pte, TmplPte);
}
/* Get the PPE */
Pte = MiAddressToPpe(Address);
if (!Pte->u.Hard.Valid)
{
// TmplPte.u.Hard.PageFrameNumber = MiAllocPage(TRUE);
InterlockedExchangePte(Pte, TmplPte);
}
/* Get the PDE */
Pte = MiAddressToPde(Address);
if (!Pte->u.Hard.Valid)
{
// TmplPte.u.Hard.PageFrameNumber = MiAllocPage(TRUE);
InterlockedExchangePte(Pte, TmplPte);
}
}
else
{
/* Get the PXE */
Pte = MiAddressToPxe(Address);
if (!Pte->u.Hard.Valid)
return NULL;
/* Get the PPE */
Pte = MiAddressToPpe(Address);
if (!Pte->u.Hard.Valid)
return NULL;
/* Get the PDE */
Pte = MiAddressToPde(Address);
if (!Pte->u.Hard.Valid)
return NULL;
}
return MiAddressToPte(Address);
}
static
ULONG64
MiGetPteValueForProcess(
PEPROCESS Process,
PVOID Address)
{
PMMPTE Pte;
ULONG64 PteValue;
Pte = MiGetPteForProcess(Process, Address, FALSE);
PteValue = Pte ? Pte->u.Long : 0;
if (MiIsHyperspaceAddress(Pte))
MmDeleteHyperspaceMapping((PVOID)PAGE_ROUND_DOWN(Pte));
return PteValue;
}
ULONG
NTAPI
MiGetPteProtection(MMPTE Pte)
{
ULONG Protect;
if (!Pte.u.Flush.Valid)
{
Protect = PAGE_NOACCESS;
}
else if (Pte.u.Flush.NoExecute)
{
if (Pte.u.Flush.CopyOnWrite)
Protect = PAGE_WRITECOPY;
else if (Pte.u.Flush.Write)
Protect = PAGE_READWRITE;
else
Protect = PAGE_READONLY;
}
else
{
if (Pte.u.Flush.CopyOnWrite)
Protect = PAGE_EXECUTE_WRITECOPY;
else if (Pte.u.Flush.Write)
Protect = PAGE_EXECUTE_READWRITE;
else
Protect = PAGE_EXECUTE_READ;
}
if (Pte.u.Flush.CacheDisable)
Protect |= PAGE_NOCACHE;
if (Pte.u.Flush.WriteThrough)
Protect |= PAGE_WRITETHROUGH;
// PAGE_GUARD ?
return Protect;
}
VOID
NTAPI
MiSetPteProtection(PMMPTE Pte, ULONG Protection)
{
Pte->u.Flush.CopyOnWrite = (Protection & PAGE_WRITECOPY_ANY) ? 1 : 0;
Pte->u.Flush.Write = (Protection & PAGE_WRITE_ANY) ? 1 : 0;
Pte->u.Flush.CacheDisable = (Protection & PAGE_NOCACHE) ? 1 : 0;
Pte->u.Flush.WriteThrough = (Protection & PAGE_WRITETHROUGH) ? 1 : 0;
// FIXME: This doesn't work. Why?
// Pte->u.Flush.NoExecute = (Protection & PAGE_EXECUTE_ANY) ? 0 : 1;
}
/* FUNCTIONS ***************************************************************/
PFN_NUMBER
NTAPI
MmGetPfnForProcess(PEPROCESS Process,
PVOID Address)
{
MMPTE Pte;
Pte.u.Long = MiGetPteValueForProcess(Process, Address);
return Pte.u.Hard.Valid ? Pte.u.Hard.PageFrameNumber : 0;
}
PHYSICAL_ADDRESS
NTAPI
MmGetPhysicalAddress(PVOID Address)
{
PHYSICAL_ADDRESS p;
MMPTE Pte;
Pte.u.Long = MiGetPteValueForProcess(NULL, Address);
if (Pte.u.Hard.Valid)
{
p.QuadPart = Pte.u.Hard.PageFrameNumber * PAGE_SIZE;
p.u.LowPart |= (ULONG_PTR)Address & (PAGE_SIZE - 1);
}
else
{
p.QuadPart = 0;
}
return p;
}
BOOLEAN
NTAPI
MmIsPagePresent(PEPROCESS Process, PVOID Address)
{
MMPTE Pte;
Pte.u.Long = MiGetPteValueForProcess(Process, Address);
return Pte.u.Hard.Valid;
}
BOOLEAN
NTAPI
MmIsPageSwapEntry(PEPROCESS Process, PVOID Address)
{
MMPTE Pte;
Pte.u.Long = MiGetPteValueForProcess(Process, Address);
return Pte.u.Hard.Valid && Pte.u.Soft.Transition;
}
BOOLEAN
NTAPI
MmIsDirtyPage(PEPROCESS Process, PVOID Address)
{
MMPTE Pte;
Pte.u.Long = MiGetPteValueForProcess(Process, Address);
return Pte.u.Hard.Valid && Pte.u.Hard.Dirty;
}
ULONG
NTAPI
MmGetPageProtect(PEPROCESS Process, PVOID Address)
{
MMPTE Pte;
Pte.u.Long = MiGetPteValueForProcess(Process, Address);
return MiGetPteProtection(Pte);
}
VOID
NTAPI
MmSetPageProtect(PEPROCESS Process, PVOID Address, ULONG flProtect)
{
PMMPTE Pte;
MMPTE NewPte;
Pte = MiGetPteForProcess(Process, Address, FALSE);
ASSERT(Pte != NULL);
NewPte = *Pte;
MiSetPteProtection(&NewPte, flProtect);
InterlockedExchangePte(Pte, NewPte);
MiFlushTlb(Pte, Address);
}
VOID
NTAPI
MmSetCleanPage(PEPROCESS Process, PVOID Address)
{
PMMPTE Pte;
Pte = MiGetPteForProcess(Process, Address, FALSE);
if (!Pte)
{
KeBugCheckEx(MEMORY_MANAGEMENT, 0x1234, (ULONG64)Address, 0, 0);
}
/* Ckear the dirty bit */
if (InterlockedBitTestAndReset64((PVOID)Pte, 6))
{
if (!MiIsHyperspaceAddress(Pte))
__invlpg(Address);
}
MiFlushTlb(Pte, Address);
}
VOID
NTAPI
MmSetDirtyPage(PEPROCESS Process, PVOID Address)
{
PMMPTE Pte;
Pte = MiGetPteForProcess(Process, Address, FALSE);
if (!Pte)
{
KeBugCheckEx(MEMORY_MANAGEMENT, 0x1234, (ULONG64)Address, 0, 0);
}
/* Ckear the dirty bit */
if (InterlockedBitTestAndSet64((PVOID)Pte, 6))
{
if (!MiIsHyperspaceAddress(Pte))
__invlpg(Address);
}
MiFlushTlb(Pte, Address);
}
NTSTATUS
NTAPI
Mmi386ReleaseMmInfo(PEPROCESS Process)
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
VOID
NTAPI
MmDisableVirtualMapping(PEPROCESS Process, PVOID Address, BOOLEAN* WasDirty, PPFN_NUMBER Page)
{
UNIMPLEMENTED;
}
VOID
NTAPI
MmRawDeleteVirtualMapping(PVOID Address)
{
UNIMPLEMENTED;
}
VOID
NTAPI
MmDeleteVirtualMapping(
PEPROCESS Process,
PVOID Address,
BOOLEAN FreePage,
BOOLEAN* WasDirty,
PPFN_NUMBER Page)
{
PFN_NUMBER Pfn;
PMMPTE Pte;
MMPTE OldPte;
Pte = MiGetPteForProcess(Process, Address, FALSE);
if (Pte)
{
/* Atomically set the entry to zero and get the old value. */
OldPte.u.Long = InterlockedExchange64((LONG64*)&Pte->u.Long, 0);
if (OldPte.u.Hard.Valid)
{
Pfn = OldPte.u.Hard.PageFrameNumber;
if (FreePage)
MmReleasePageMemoryConsumer(MC_NPPOOL, Pfn);
}
else
Pfn = 0;
}
else
{
OldPte.u.Long = 0;
Pfn = 0;
}
/* Return information to the caller */
if (WasDirty)
*WasDirty = OldPte.u.Hard.Dirty;;
if (Page)
*Page = Pfn;
MiFlushTlb(Pte, Address);
}
VOID
NTAPI
MmDeletePageFileMapping(PEPROCESS Process, PVOID Address,
SWAPENTRY* SwapEntry)
{
UNIMPLEMENTED;
}
VOID
NTAPI
MmEnableVirtualMapping(PEPROCESS Process, PVOID Address)
{
UNIMPLEMENTED;
}
NTSTATUS
NTAPI
MmCreatePageFileMapping(PEPROCESS Process,
PVOID Address,
SWAPENTRY SwapEntry)
{
UNIMPLEMENTED;
return STATUS_UNSUCCESSFUL;
}
NTSTATUS
NTAPI
MmCreateVirtualMappingUnsafe(
PEPROCESS Process,
PVOID Address,
ULONG PageProtection,
PPFN_NUMBER Pages,
ULONG PageCount)
{
ULONG i;
MMPTE TmplPte, *Pte;
/* Check if the range is valid */
if ((Process == NULL && Address < MmSystemRangeStart) ||
(Process != NULL && Address > MmHighestUserAddress))
{
DPRINT1("Address 0x%p is invalid for process %p\n", Address, Process);
ASSERT(FALSE);
}
TmplPte.u.Long = 0;
TmplPte.u.Hard.Valid = 1;
MiSetPteProtection(&TmplPte, PageProtection);
//__debugbreak();
for (i = 0; i < PageCount; i++)
{
TmplPte.u.Hard.PageFrameNumber = Pages[i];
Pte = MiGetPteForProcess(Process, Address, TRUE);
DPRINT1("MmCreateVirtualMappingUnsafe, Address=%p, TmplPte=%p, Pte=%p\n",
Address, TmplPte.u.Long, Pte);
if (InterlockedExchangePte(Pte, TmplPte))
{
KeInvalidateTlbEntry(Address);
}
if (MiIsHyperspaceAddress(Pte))
MmDeleteHyperspaceMapping((PVOID)PAGE_ROUND_DOWN(Pte));
Address = (PVOID)((ULONG64)Address + PAGE_SIZE);
}
return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
MmCreateVirtualMapping(PEPROCESS Process,
PVOID Address,
ULONG Protect,
PPFN_NUMBER Pages,
ULONG PageCount)
{
ULONG i;
for (i = 0; i < PageCount; i++)
{
if (!MmIsPageInUse(Pages[i]))
{
DPRINT1("Page %x not in use\n", Pages[i]);
KeBugCheck(MEMORY_MANAGEMENT);
}
}
return MmCreateVirtualMappingUnsafe(Process, Address, Protect, Pages, PageCount);
}
NTSTATUS
NTAPI
MmInitializeHandBuiltProcess(IN PEPROCESS Process,
IN PULONG_PTR DirectoryTableBase)
{
/* Share the directory base with the idle process */
DirectoryTableBase[0] = PsGetCurrentProcess()->Pcb.DirectoryTableBase[0];
DirectoryTableBase[1] = PsGetCurrentProcess()->Pcb.DirectoryTableBase[1];
/* Initialize the Addresss Space */
KeInitializeGuardedMutex(&Process->AddressCreationLock);
Process->Vm.WorkingSetExpansionLinks.Flink = NULL;
ASSERT(Process->VadRoot.NumberGenericTableElements == 0);
Process->VadRoot.BalancedRoot.u1.Parent = &Process->VadRoot.BalancedRoot;
/* The process now has an address space */
Process->HasAddressSpace = TRUE;
return STATUS_SUCCESS;
}
BOOLEAN
NTAPI
MmCreateProcessAddressSpace(IN ULONG MinWs,
IN PEPROCESS Process,
IN PULONG_PTR DirectoryTableBase)
{
UNIMPLEMENTED;
return 0;
}
BOOLEAN
NTAPI
_MmIsAddressValid(IN PVOID VirtualAddress)
{
/* Check all four page table levels */
return (MiAddressToPxe(VirtualAddress)->u.Hard.Valid != 0 &&
MiAddressToPpe(VirtualAddress)->u.Hard.Valid != 0 &&
MiAddressToPde(VirtualAddress)->u.Hard.Valid != 0 &&
MiAddressToPte(VirtualAddress)->u.Hard.Valid != 0);
}
/* EOF */

View file

@ -84,6 +84,22 @@
<file>ctxhelp.S</file>
</directory>
</if>
<if property="ARCH" value="amd64">
<directory name="amd64">
<file first="true">boot.S</file>
<file>context.c</file>
<file>cpu.c</file>
<file>ctxswitch.S</file>
<file>except.c</file>
<file>interrupt.c</file>
<file>irql.c</file>
<file>kiinit.c</file>
<file>spinlock.c</file>
<file>stubs.c</file>
<file>thrdini.c</file>
<file>trap.S</file>
</directory>
</if>
<file>apc.c</file>
<file>balmgr.c</file>
<file>bug.c</file>
@ -93,7 +109,9 @@
<file>dpc.c</file>
<file>eventobj.c</file>
<file>except.c</file>
<file>freeldr.c</file>
<if property="ARCH" value="i386">
<file>freeldr.c</file>
</if>
<file>freeze.c</file>
<file>gate.c</file>
<file>gmutex.c</file>
@ -125,6 +143,11 @@
<file>cmhardwr.c</file>
</directory>
</if>
<if property="ARCH" value="amd64">
<directory name="i386">
<file>cmhardwr.c</file>
</directory>
</if>
<if property="ARCH" value="arm">
<directory name="arm">
<file>cmhardwr.c</file>
@ -178,6 +201,11 @@
<file>ioport.S</file>
</directory>
</if>
<if property="ARCH" value="amd64">
<directory name="amd64">
<file>fastinterlck.c</file>
</directory>
</if>
<file>atom.c</file>
<file>callback.c</file>
<file>dbgctrl.c</file>
@ -299,6 +327,17 @@
</if>
</directory>
</if>
<if property="ARCH" value="amd64">
<directory name="amd64">
<if property="KDBG" value="1">
<group>
<file>i386-dis.c</file>
<file>kdb_help.S</file>
<file>kdb.c</file>
</group>
</if>
</directory>
</if>
<if property="KDBG" value="1">
<file>kdb.c</file>
<file>kdb_cli.c</file>
@ -321,6 +360,12 @@
</if>
<file>kdbg.c</file>
</directory>
<if property="ARCH" value="amd64">
<directory name="amd64">
<file>kd.c</file>
<file>kdmemsup.c</file>
</directory>
</if>
<file>kdinit.c</file>
<file>kdio.c</file>
<file>kdmain.c</file>
@ -379,6 +424,12 @@
<file>page.c</file>
</directory>
</if>
<if property="ARCH" value="amd64">
<directory name="amd64">
<file>init.c</file>
<file>page.c</file>
</directory>
</if>
<directory name="ARM3">
<if property="ARCH" value="i386">
<directory name="i386">
@ -462,6 +513,11 @@
<file>psctx.c</file>
</directory>
</if>
<if property="ARCH" value="amd64">
<directory name="amd64">
<file>psctx.c</file>
</directory>
</if>
<file>debug.c</file>
<file>job.c</file>
<file>kill.c</file>

View file

@ -0,0 +1,95 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* FILE: ntoskrnl/ps/amd64/pxctx.c
* PURPOSE: Process Manager: Set/Get Context for i386
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
* Timo Kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES *******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* FUNCTIONS ******************************************************************/
VOID
NTAPI
PspGetContext(IN PKTRAP_FRAME TrapFrame,
IN PVOID NonVolatileContext,
IN OUT PCONTEXT Context)
{
PAGED_CODE();
/* Convert the trap frame to a context */
KeTrapFrameToContext(TrapFrame, NULL, Context);
}
VOID
NTAPI
PspSetContext(OUT PKTRAP_FRAME TrapFrame,
OUT PVOID NonVolatileContext,
IN PCONTEXT Context,
IN KPROCESSOR_MODE Mode)
{
PAGED_CODE();
/* Convert the context to a trap frame structure */
KeContextToTrapFrame(Context, NULL, TrapFrame, Context->ContextFlags, Mode);
}
VOID
NTAPI
PspGetOrSetContextKernelRoutine(IN PKAPC Apc,
IN OUT PKNORMAL_ROUTINE* NormalRoutine,
IN OUT PVOID* NormalContext,
IN OUT PVOID* SystemArgument1,
IN OUT PVOID* SystemArgument2)
{
PGET_SET_CTX_CONTEXT GetSetContext;
PKEVENT Event;
PCONTEXT Context;
PKTHREAD Thread;
KPROCESSOR_MODE Mode;
PKTRAP_FRAME TrapFrame = NULL;
PAGED_CODE();
/* Get the Context Structure */
GetSetContext = CONTAINING_RECORD(Apc, GET_SET_CTX_CONTEXT, Apc);
Context = &GetSetContext->Context;
Event = &GetSetContext->Event;
Mode = GetSetContext->Mode;
Thread = Apc->SystemArgument2;
/* If this is a kernel-mode request, grab the saved trap frame */
if (Mode == KernelMode) TrapFrame = Thread->TrapFrame;
/* If we don't have one, grab it from the stack */
if (!TrapFrame)
{
DPRINT1("FIXME!!!!\n");
/* Trap frame is right under our initial stack */
// TrapFrame = (PKTRAP_FRAME)((ULONG_PTR)Thread->InitialStack -
// ROUND_UP(sizeof(KTRAP_FRAME), KTRAP_FRAME_ALIGN) -
// sizeof(FX_SAVE_AREA));
}
/* Check if it's a set or get */
if (Apc->SystemArgument1)
{
/* Get the Context */
PspSetContext(TrapFrame, NULL, Context, Mode);
}
else
{
/* Set the Context */
PspGetContext(TrapFrame, NULL, Context);
}
/* Notify the Native API that we are done */
KeSetEvent(Event, IO_NO_INCREMENT, FALSE);
}
/* EOF */