[NTOS]: Implement all V8086 Opcode emulation in C instead of ASM.

svn path=/trunk/; revision=45029
This commit is contained in:
Sir Richard 2010-01-10 15:40:00 +00:00
parent ff8920ffc1
commit 15b66143d6
4 changed files with 474 additions and 642 deletions

View file

@ -84,6 +84,60 @@ typedef union _KTRAP_EXIT_SKIP_BITS
};
UCHAR Bits;
} KTRAP_EXIT_SKIP_BITS, *PKTRAP_EXIT_SKIP_BITS;
//
// Flags used by the VDM/V8086 emulation engine for determining instruction prefixes
//
#define PFX_FLAG_ES 0x00000100
#define PFX_FLAG_CS 0x00000200
#define PFX_FLAG_SS 0x00000400
#define PFX_FLAG_DS 0x00000800
#define PFX_FLAG_FS 0x00001000
#define PFX_FLAG_GS 0x00002000
#define PFX_FLAG_OPER32 0x00004000
#define PFX_FLAG_ADDR32 0x00008000
#define PFX_FLAG_LOCK 0x00010000
#define PFX_FLAG_REPNE 0x00020000
#define PFX_FLAG_REP 0x00040000
//
// VDM Helper Macros
//
// All VDM/V8086 opcode emulators have the same FASTCALL function definition.
// We need to keep 2 parameters while the original ASM implementation uses 4:
// TrapFrame, PrefixFlags, Eip, InstructionSize;
//
// We pass the trap frame, and prefix flags, in our two parameters.
//
// We then realize that since the smallest prefix flag is 0x100, this gives us
// a count of up to 0xFF. So we OR in the instruction size with the prefix flags
//
// We further realize that we always have access to EIP from the trap frame, and
// that if we want the *current instruction* EIP, we simply have to add the
// instruction size *MINUS ONE*, and that gives us the EIP we should be looking
// at now, so we don't need to use the stack to push this parameter.
//
// We actually only care about the *current instruction* EIP in one location,
// so although it may be slightly more expensive to re-calculate the EIP one
// more time, this way we don't redefine ALL opcode handlers to have 3 parameters,
// which would be forcing stack usage in all other scenarios.
//
#define KiVdmSetVdmEFlags(x) InterlockedOr((PLONG)KiNtVdmState, (x));
#define KiVdmClearVdmEFlags(x) InterlockedAnd((PLONG)KiNtVdmState, ~(x))
#define KiCallVdmHandler(x) KiVdmOpcode##x(TrapFrame, Flags)
#define KiCallVdmPrefixHandler(x) KiVdmOpcodePrefix(TrapFrame, Flags | x)
#define KiVdmUnhandledOpcode(x) \
BOOLEAN \
FASTCALL \
KiVdmOpcode##x(IN PKTRAP_FRAME TrapFrame, \
IN ULONG Flags) \
{ \
/* Not yet handled */ \
UNIMPLEMENTED; \
while (TRUE); \
}
//
// Registers an interrupt handler with an IDT vector

View file

@ -755,7 +755,8 @@ RaiseIrql:
sti
/* Handle the opcode */
call _Ki386HandleOpcodeV86@0
mov ecx, ebp
call @Ki386HandleOpcodeV86@4
/* Check if this was VDM */
test al, 0xFF

View file

@ -12,614 +12,6 @@
#include <internal/i386/asmmacro.S>
.intel_syntax noprefix
/* FIXME: Can we make a nice macro to generate V86 Opcode handlers? */
/* GLOBALS *******************************************************************/
//
// This table contains indexes into the OpcodeDispatchV86 Table for opcodes in
// Virtual-8086 Mode.
// There are 256 entries.
//
OpcodeIndex:
INVALID_V86_OPCODE 15 /* OP 00-14: UNHANDLED */
.byte 1 /* OP 0F: 0F */
INVALID_V86_OPCODE 22 /* OP 10-25: UNHANDLED */
.byte 2 /* OP 26: ES Prefix */
INVALID_V86_OPCODE 7 /* OP 27-2D: UNHANDLED */
.byte 3 /* OP 2E: CS Prefix */
INVALID_V86_OPCODE 7 /* OP 2F-35: UNHANDLED */
.byte 4 /* OP 36: SS Prefix */
INVALID_V86_OPCODE 7 /* OP 37-3D: UNHANDLED */
.byte 5 /* OP 3E: DS Prefix */
INVALID_V86_OPCODE 37 /* OP 3F-63: UNHANDLED */
.byte 6 /* OP 64: FS Prefix */
.byte 7 /* OP 65: GS Prefix */
.byte 8 /* OP 66: OPER32 Prefix */
.byte 9 /* OP 67: ADDR32 Prefix */
INVALID_V86_OPCODE 4 /* OP 68-6B: UNHANDLED */
.byte 10 /* OP 6C: INSB */
.byte 11 /* OP 6D: INSW */
.byte 12 /* OP 6E: OUTSB */
.byte 13 /* OP 6F: OUTSW */
INVALID_V86_OPCODE 43 /* OP 70-9A: UNHANDLED */
.byte 19 /* OP 9B: NPX */
.byte 14 /* OP 9C: PUSHF */
.byte 15 /* OP 9D: POPF */
INVALID_V86_OPCODE 47 /* OP 9E-CC: UNHANDLED */
.byte 16 /* OP CD: INTnn */
.byte 17 /* OP CE: INTO */
.byte 18 /* OP CF: IRETD */
INVALID_V86_OPCODE 8 /* OP D0-D7: UNHANDLED */
.byte 19 /* OP D8: NPX */
.byte 19 /* OP D9: NPX */
.byte 19 /* OP DA: NPX */
.byte 19 /* OP DB: NPX */
.byte 19 /* OP DC: NPX */
.byte 19 /* OP DD: NPX */
.byte 19 /* OP DE: NPX */
.byte 19 /* OP DF: NPX */
INVALID_V86_OPCODE 4 /* OP DE-E3: UNHANDLED */
.byte 20 /* OP E4: INBimm */
.byte 21 /* OP E5: INWimm */
.byte 22 /* OP E6: OUTBimm */
.byte 23 /* OP E7: OUTWimm */
INVALID_V86_OPCODE 4 /* OP E8-EB: UNHANDLED */
.byte 24 /* OP EC: INB */
.byte 25 /* OP EF: INW */
.byte 26 /* OP EE: OUTB */
.byte 27 /* OP EF: OUTW */
.byte 28 /* OP F0: LOCK Prefix */
.byte 0 /* OP F1: UNHANDLED */
.byte 29 /* OP F2: REPNE Prefix */
.byte 30 /* OP F3: REP Prefix */
.byte 33 /* OP F4: HLT */
INVALID_V86_OPCODE 5 /* OP F5-F9: UNHANDLED */
.byte 31 /* OP FA: CLI */
.byte 32 /* OP FB: STI */
INVALID_V86_OPCODE 4 /* OP FC-FF: UNHANDLED */
//
// This table contains the emulation routines for
// Virtual-8086 Mode. There are 34 entries.
//
OpcodeDispatchV86:
.long _OpcodeInvalidV86
.long _Opcode0FV86
.long _OpcodeESPrefixV86
.long _OpcodeCSPrefixV86
.long _OpcodeSSPrefixV86
.long _OpcodeDSPrefixV86
.long _OpcodeFSPrefixV86
.long _OpcodeGSPrefixV86
.long _OpcodeOPER32PrefixV86
.long _OpcodeADDR32PrefixV86
.long _OpcodeINSBV86
.long _OpcodeINSWV86
.long _OpcodeOUTSBV86
.long _OpcodeOUTSWV86
.long _OpcodePUSHFV86
.long _OpcodePOPFV86
.long _OpcodeINTnnV86
.long _OpcodeINTOV86
.long _OpcodeIRETV86
.long _OpcodeNPXV86
.long _OpcodeINBimmV86
.long _OpcodeINWimmV86
.long _OpcodeOUTBimmV86
.long _OpcodeOUTWimmV86
.long _OpcodeINBV86
.long _OpcodeINWV86
.long _OpcodeOUTBV86
.long _OpcodeOUTWV86
.long _OpcodeLOCKPrefixV86
.long _OpcodeREPNEPrefixV86
.long _OpcodeREPPrefixV86
.long _OpcodeCLIV86
.long _OpcodeSTIV86
.long _OpcodeHLTV86
_ExVdmOpcodeDispatchCounts:
.rept 34
.long 0
.endr
V86DebugMsg:
.asciz "Received V86 Emulation Opcode: %lx\n"
/* VIRTUAL-8086 MODE OPCODER HANDLERS ****************************************/
.func OpcodeInvalidV86
_OpcodeInvalidV86:
UNHANDLED_V86_OPCODE
.endfunc
.func Opcode0FV86
_Opcode0FV86:
UNHANDLED_V86_OPCODE
.endfunc
#undef LOCK
GENERATE_PREFIX_HANDLER ES
GENERATE_PREFIX_HANDLER CS
GENERATE_PREFIX_HANDLER DS
GENERATE_PREFIX_HANDLER FS
GENERATE_PREFIX_HANDLER GS
GENERATE_PREFIX_HANDLER SS
GENERATE_PREFIX_HANDLER OPER32
GENERATE_PREFIX_HANDLER ADDR32
GENERATE_PREFIX_HANDLER LOCK
GENERATE_PREFIX_HANDLER REP
GENERATE_PREFIX_HANDLER REPNE
#ifdef CONFIG_SMP
#define LOCK lock
#else
#define LOCK
#endif
.func OpcodeINSBV86
_OpcodeINSBV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeINSWV86
_OpcodeINSWV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeOUTSBV86
_OpcodeOUTSBV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeOUTSWV86
_OpcodeOUTSWV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodePUSHFV86
_OpcodePUSHFV86:
/* Get VDM state */
mov eax, FIXED_NTVDMSTATE_LINEAR_PC_AT
mov eax, [eax]
/* Get EFLAGS and mask out IF */
mov edx, [ebp+KTRAP_FRAME_EFLAGS]
and eax, ~EFLAGS_INTERRUPT_MASK
/* Mask align check and interrupt mask */
and eax, EFLAGS_ALIGN_CHECK + EFLAGS_NESTED_TASK + EFLAGS_INTERRUPT_MASK
or eax, edx
/* Add IOPL Mask */
or eax, EFLAGS_IOPL
/* Get flat ESP */
movzx ecx, word ptr [ebp+KTRAP_FRAME_SS]
shl ecx, 4
movzx edx, word ptr [ebp+KTRAP_FRAME_ESP]
sub dx, 2
/* Check if there is an OPER32 prefix */
test ebx, PREFIX_FLAG_OPER32
jnz SkipPrefix
/* Push EFLAGS */
mov [ecx+edx], ax
UpdateFrame:
/* Update ESP and EIP */
mov [ebp+KTRAP_FRAME_ESP], dx
add [ebp+KTRAP_FRAME_EIP], edi
/* Return success */
mov eax, 1
ret
SkipPrefix:
/* Skip the prefix, push EFLAGS and jump back */
sub dx, 2
mov [edx+ecx], eax
jmp UpdateFrame
.endfunc
.func OpcodePOPFV86
_OpcodePOPFV86:
/* Get VDM state */
mov eax, FIXED_NTVDMSTATE_LINEAR_PC_AT
/* Get flat ESP */
mov ecx, [ebp+KTRAP_FRAME_SS]
shl ecx, 4
movzx edx, word ptr [ebp+KTRAP_FRAME_ESP]
/* Pop EFLAGS */
mov ecx, [ecx+edx]
add edx, 4
/* Check for OPER32 prefix */
test ebx, PREFIX_FLAG_OPER32
jnz NoPrefix
/* Skip 2 bytes */
and ecx, 0xFFFF
sub edx, 2
NoPrefix:
/* Set new ESP */
mov [ebp+KTRAP_FRAME_ESP], edx
/* Mask out EFLAGS */
and eax, ~EFLAGS_IOPL
mov ebx, ebx
and ebx, ~EFLAGS_NESTED_TASK
and ecx, EFLAGS_ALIGN_CHECK + EFLAGS_NESTED_TASK + EFLAGS_INTERRUPT_MASK
/* FIXME: Support VME */
/* Save VDM State pointer */
push eax
/* Set new EFLAGS, make sure to add IF and V86 */
or ebx, EFLAGS_INTERRUPT_MASK + EFLAGS_V86_MASK
push [ebp+KTRAP_FRAME_EFLAGS]
mov [ebp+KTRAP_FRAME_EFLAGS], ebx
/* Make sure we were in V86 mode */
test ebx, EFLAGS_V86_MASK
jnz CheckEspAdjust
int 3
CheckEspAdjust:
/* Check if we have to update ESP0 and fixup the stack from our push */
test dword ptr [ebp+KTRAP_FRAME_EFLAGS], EFLAGS_V86_MASK
lea esp, [esp+4]
jnz NoAdjustEsp0
/* Adjust it */
push ebp
call _Ki386AdjustEsp0@4
NoAdjustEsp0:
/* Restore VDM state */
pop eax
/* Update the flags in the VDM State */
LOCK and dword ptr [eax], ~(EFLAGS_ALIGN_CHECK + EFLAGS_NESTED_TASK + EFLAGS_INTERRUPT_MASK)
LOCK or [eax], ecx
/* Update EIP */
add [ebp+KTRAP_FRAME_EIP], edi
/* FIXME: Check for VDM Pending interrupts */
/* Return success */
mov eax, 1
ret
.endfunc
.func OpcodeINTnnV86
_OpcodeINTnnV86:
/* Get EFlags */
mov edx, [ebp+KTRAP_FRAME_EFLAGS]
/* Remove the flag in the VDM State */
mov eax, FIXED_NTVDMSTATE_LINEAR_PC_AT
mov ecx, [eax]
LOCK and dword ptr [eax], ~EFLAGS_INTERRUPT_MASK
/* Mask it out from EFLAGS too */
mov eax, edx
and eax, ~EFLAGS_INTERRUPT_MASK
/* Mask out the alignment check and IF flag from the VDM state */
and ecx, EFLAGS_ALIGN_CHECK + EFLAGS_INTERRUPT_MASK
/* FIXME: Support VME */
/* Now mask out VIF and TF */
or eax, ecx
and edx, ~(EFLAGS_VIF + EFLAGS_NESTED_TASK + EFLAGS_TF)
mov [ebp+KTRAP_FRAME_EFLAGS], edx
/* Set the IOPL Mask */
or eax, EFLAGS_IOPL
/* Get stack flat address */
movzx ecx, word ptr [ebp+KTRAP_FRAME_SS]
shl ecx, 4
movzx edx, word ptr [ebp+KTRAP_FRAME_ESP]
/* Push EFLAGS */
sub dx, 2
mov word ptr [ecx+edx], ax
/* Push CS */
mov ax, word ptr [ebp+KTRAP_FRAME_CS]
sub dx, 2
mov word ptr [ecx+edx], ax
/* Push IP */
movzx eax, word ptr [ebp+KTRAP_FRAME_EIP]
add eax, edi
inc eax
sub dx, 2
mov word ptr [ecx+edx], ax
/* Update ESP */
mov [ebp+KTRAP_FRAME_ESP], dx
/* Get the interrupt */
inc esi
movzx ecx, byte ptr [esi]
/* FIXME: Analyze and see if this is a hooked VDM (PM) Interrupt */
/* Get the entry in the IVT */
mov ebx, [ecx*4]
mov eax, ebx
shr eax, 16
/* Update EIP */
mov word ptr [ebp+KTRAP_FRAME_EIP], bx
/* Check if this was V86 mode */
test dword ptr [ebp+KTRAP_FRAME_EFLAGS], EFLAGS_V86_MASK
jnz SetCs
/* Check if it was a kernel CS */
or ax, RPL_MASK
cmp ax, KGDT_R0_CODE
jnb SetCs
/* Set user-mode CS */
mov ax, KGDT_R3_CODE + RPL_MASK
SetCs:
/* Set new CS */
mov [ebp+KTRAP_FRAME_CS], ax
/* Return success */
mov eax, 1
ret
.endfunc
.func OpcodeINTOV86
_OpcodeINTOV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeIRETV86
_OpcodeIRETV86:
/* Get the VDM State */
mov eax, FIXED_NTVDMSTATE_LINEAR_PC_AT
/* Get flat ESP */
movzx ecx, word ptr [ebp+KTRAP_FRAME_SS]
shl ecx, 4
movzx edx, word ptr [ebp+KTRAP_FRAME_ESP]
add ecx, edx
/* Check for OPER32 prefix */
test ebx, PREFIX_FLAG_OPER32
jnz Iret32
/* Get flat IP */
movzx edi, word ptr [ecx]
mov [ebp+KTRAP_FRAME_EIP], edi
movzx esi, word ptr [ecx+2]
mov [ebp+KTRAP_FRAME_CS], esi
/* Update ESP */
add edx, 6
mov [ebp+KTRAP_FRAME_ESP], edx
/* Get EFLAGS */
movzx ebx, word ptr [ecx+4]
MaskEFlags:
/* Mask out EFLAGS */
and ebx, ~(EFLAGS_IOPL + EFLAGS_VIF + EFLAGS_NESTED_TASK + EFLAGS_VIP)
mov ecx, ebx
/* FIXME: Check for VME support */
/* Save VDM state */
push eax
/* Enable V86 and Interrupts */
or ebx, EFLAGS_V86_MASK + EFLAGS_INTERRUPT_MASK
/* Save old flags */
mov eax, [ebp+KTRAP_FRAME_EFLAGS]
push eax
/* Mask out VIP and set new eflags */
and eax, EFLAGS_VIP
or eax, ebx
mov [ebp+KTRAP_FRAME_EFLAGS], eax
/* Check if we have to update ESP0 */
pop ebx
test ebx, EFLAGS_V86_MASK
jnz NoEsp0Update
/* Save ECX and ECX */
push ecx
push edx
/* Update esp0 and restore registers */
push ebp
call _Ki386AdjustEsp0@4
pop edx
pop ecx
NoEsp0Update:
/* Put VDM state in EAX and update VDM EFlags */
pop eax
and ecx, EFLAGS_INTERRUPT_MASK
LOCK and dword ptr [eax], ~EFLAGS_INTERRUPT_MASK
LOCK or [eax], ecx
/* Get flat EIP and check if this is the BOP */
shl esi, 4
add esi, edi
mov ax, [esi]
cmp ax, 0xC4C4
jz IsBop
/* FIXME: Check for VDM interrupts */
/* Return success */
RetIret:
mov eax, 1
ret
IsBop:
/* Call the BOP handler */
push ebp
call _VdmDispatchBop@4
jmp RetIret
Iret32:
/* Get 32-bit flat EIP */
mov edi, [ecx]
mov [ebp+KTRAP_FRAME_EIP], edi
movzx esi, word ptr [ecx+4]
mov [ebp+KTRAP_FRAME_CS], esi
/* Set new ESP */
add edx, 12
mov [ebp+KTRAP_FRAME_ESP], edx
/* Get EFLAGS and continue */
mov ebx, [ecx+8]
jmp MaskEFlags
.endfunc
.func OpcodeNPXV86
_OpcodeNPXV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeINBimmV86
_OpcodeINBimmV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeINWimmV86
_OpcodeINWimmV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeOUTBimmV86
_OpcodeOUTBimmV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeOUTWimmV86
_OpcodeOUTWimmV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeINBV86
_OpcodeINBV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeINWV86
_OpcodeINWV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeOUTBV86
_OpcodeOUTBV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeOUTWV86
_OpcodeOUTWV86:
UNHANDLED_V86_OPCODE
.endfunc
.func OpcodeCLIV86
_OpcodeCLIV86:
/* Get VDM State */
mov eax, FIXED_NTVDMSTATE_LINEAR_PC_AT
/* FIXME: Support VME */
/* FIXME: Support VDM Interrupts */
/* Disable interrupts */
LOCK and dword ptr [eax], ~EFLAGS_INTERRUPT_MASK
/* Update EIP (remember EDI == instruction size) */
add [ebp+KTRAP_FRAME_EIP], edi
/* Return success */
mov eax, 1
ret
.endfunc
.func OpcodeSTIV86
_OpcodeSTIV86:
/* Get VDM State */
mov eax, FIXED_NTVDMSTATE_LINEAR_PC_AT
/* FIXME: Support VME */
/* Enable interrupts */
LOCK or dword ptr [eax], EFLAGS_INTERRUPT_MASK
/* Update EIP (remember EDI == instruction size) */
add [ebp+KTRAP_FRAME_EIP], edi
/* FIXME: Support VDM Interrupts */
/* Return success */
mov eax, 1
ret
.endfunc
.func OpcodeGenericPrefixV86
_OpcodeGenericPrefixV86:
/* Skip instruction */
inc esi
inc edi
/* Get the instruction */
movzx ecx, byte ptr [esi]
/* Get the opcode index */
movzx edx, byte ptr OpcodeIndex[ecx]
/* Dispatch it */
jmp OpcodeDispatchV86[edx*4]
.endfunc
.func OpcodeHLTV86
_OpcodeHLTV86:
UNHANDLED_V86_OPCODE
.endfunc
/* FUNCTIONS *****************************************************************/
.globl _Ki386SetupAndExitToV86Mode@4
@ -800,27 +192,3 @@ _Ki386BiosCallReturnAddress:
pop ebp
ret 4
.endfunc
.globl _Ki386HandleOpcodeV86@0
.func Ki386HandleOpcodeV86@0
_Ki386HandleOpcodeV86@0:
/* Get flat EIP */
mov esi, [ebp+KTRAP_FRAME_CS]
shl esi, 4
add esi, [ebp+KTRAP_FRAME_EIP]
/* Get the opcode entry in the table */
movzx ecx, byte ptr [esi]
movzx edx, byte ptr OpcodeIndex[ecx]
/* Set instruction length and prefix flags */
mov edi, 1
xor ebx, ebx
/* Accounting statistics */
// inc dword ptr _ExVdmOpcodeDispatchCounts[edx*4] // FIXME: Generates protection fault
/* Handle the opcode */
jmp OpcodeDispatchV86[edx*4]
.endfunc

View file

@ -1,29 +1,438 @@
/*
* PROJECT: ReactOS Kernel
* LICENSE: GPL - See COPYING in the top level directory
* LICENSE: BSD - See COPYING.ARM in the top level directory
* FILE: ntoskrnl/ke/i386/v86vdm.c
* PURPOSE: Manages the Kernel's support for Virtual-8086 Mode (V86)
* used by Video Drivers to access ROM BIOS functions, as well
* as the kernel architecture part of generic VDM support.
* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
* PURPOSE: V8086 and VDM Trap Emulation
* PROGRAMMERS: ReactOS Portable Systems Group
* Alex Ionescu (alex.ionescu@reactos.org)
*/
/* INCLUDES ******************************************************************/
/* INCLUDES *******************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
/* GLOBALS *******************************************************************/
/* GLOBALS ********************************************************************/
ULONG KeI386EFlagsAndMaskV86 = EFLAGS_USER_SANITIZE;
ULONG KeI386EFlagsOrMaskV86 = EFLAGS_INTERRUPT_MASK;
PVOID Ki386IopmSaveArea;
BOOLEAN KeI386VirtualIntExtensions = FALSE;
/* PRIVATE FUNCTIONS *********************************************************/
#if 1
const PULONG KiNtVdmState = (PULONG)FIXED_NTVDMSTATE_LINEAR_PC_AT;
/* PUBLIC FUNCTIONS **********************************************************/
/* UNHANDLED OPCODES **********************************************************/
KiVdmUnhandledOpcode(F);
KiVdmUnhandledOpcode(OUTSW);
KiVdmUnhandledOpcode(OUTSB);
KiVdmUnhandledOpcode(INSB);
KiVdmUnhandledOpcode(INSW);
KiVdmUnhandledOpcode(NPX);
KiVdmUnhandledOpcode(INBimm);
KiVdmUnhandledOpcode(INWimm);
KiVdmUnhandledOpcode(OUTBimm);
KiVdmUnhandledOpcode(OUTWimm);
KiVdmUnhandledOpcode(INB);
KiVdmUnhandledOpcode(INW);
KiVdmUnhandledOpcode(OUTB);
KiVdmUnhandledOpcode(OUTW);
KiVdmUnhandledOpcode(HLT);
KiVdmUnhandledOpcode(INTO);
KiVdmUnhandledOpcode(INV);
/* OPCODE HANDLERS ************************************************************/
BOOLEAN
FASTCALL
KiVdmOpcodePUSHF(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
ULONG Esp, V86EFlags, TrapEFlags;
/* Get current V8086 flags and mask out interrupt flag */
V86EFlags = *KiNtVdmState;
V86EFlags &= ~EFLAGS_INTERRUPT_MASK;
/* Get trap frame EFLags and leave only align, nested task and interrupt */
TrapEFlags = TrapFrame->EFlags;
TrapEFlags &= (EFLAGS_ALIGN_CHECK | EFLAGS_NESTED_TASK | EFLAGS_INTERRUPT_MASK);
/* Add in those flags if they exist, and add in the IOPL flag */
V86EFlags |= TrapEFlags;
V86EFlags |= EFLAGS_IOPL;
/* Build flat ESP */
Esp = (TrapFrame->HardwareSegSs << 4) + (USHORT)TrapFrame->HardwareEsp;
Esp -= 2;
/* Check for OPER32 */
if (Flags & PFX_FLAG_OPER32)
{
/* Save EFlags */
Esp -= 2;
*(PULONG)(Esp - 2) = V86EFlags;
}
else
{
/* Save EFLags */
*(PUSHORT)Esp = (USHORT)V86EFlags;
}
/* Set new ESP and EIP */
TrapFrame->HardwareEsp = (USHORT)Esp;
TrapFrame->Eip += (Flags & 0xFF);
/* We're done */
return TRUE;
}
BOOLEAN
FASTCALL
KiVdmOpcodePOPF(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
ULONG Esp, V86EFlags, EFlags, TrapEFlags;
/* Build flat ESP */
Esp = (TrapFrame->HardwareSegSs << 4) + (USHORT)TrapFrame->HardwareEsp;
/* Read EFlags */
EFlags = *(PULONG)Esp;
Esp += 4;
/* Check for OPER32 */
if (!(Flags & PFX_FLAG_OPER32))
{
/* Read correct flags and use correct stack address */
Esp -= 2;
EFlags &= 0xFFFF;
}
/* Set new ESP */
TrapFrame->HardwareEsp = Esp;
/* Mask out IOPL from the flags */
EFlags &= ~EFLAGS_IOPL;
/* Save the V86 flags, but mask out the nested task flag */
V86EFlags = EFlags & ~EFLAGS_NESTED_TASK;
/* Now leave only alignment, nested task and interrupt flag */
EFlags &= (EFLAGS_ALIGN_CHECK | EFLAGS_NESTED_TASK | EFLAGS_INTERRUPT_MASK);
/* FIXME: Check for VME support */
/* Add V86 and Interrupt flag */
V86EFlags |= EFLAGS_V86_MASK | EFLAGS_INTERRUPT_MASK;
/* Update EFlags in trap frame */
TrapEFlags = TrapFrame->EFlags;
TrapFrame->EFlags = (TrapFrame->EFlags & EFLAGS_VIP) | V86EFlags;
/* Check if ESP0 needs to be fixed up */
if (TrapEFlags & EFLAGS_V86_MASK) Ki386AdjustEsp0(TrapFrame);
/* Update the V8086 EFlags state */
KiVdmClearVdmEFlags(EFLAGS_ALIGN_CHECK | EFLAGS_NESTED_TASK | EFLAGS_INTERRUPT_MASK);
KiVdmSetVdmEFlags(EFlags);
/* FIXME: Check for VDM interrupts */
/* Update EIP */
TrapFrame->Eip += (Flags & 0xFF);
/* We're done */
return TRUE;
}
BOOLEAN
FASTCALL
KiVdmOpcodeINTnn(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
ULONG Esp, V86EFlags, TrapEFlags, Eip, Interrupt;
/* Read trap frame EFlags */
TrapEFlags = TrapFrame->EFlags;
/* Remove interrupt flag from V8086 EFlags */
V86EFlags = *KiNtVdmState;
KiVdmClearVdmEFlags(EFLAGS_INTERRUPT_MASK);
/* Keep only alignment and interrupt flag from the V8086 state */
V86EFlags &= (EFLAGS_ALIGN_CHECK | EFLAGS_INTERRUPT_MASK);
/* FIXME: Support VME */
/* Mask in the relevant V86 EFlags into the trap flags */
V86EFlags |= (TrapEFlags & ~EFLAGS_INTERRUPT_MASK);
/* And mask out the VIF, nested task and TF flag from the trap flags */
TrapFrame->EFlags = TrapEFlags &~ (EFLAGS_VIF | EFLAGS_NESTED_TASK | EFLAGS_TF);
/* Add the IOPL flag to the local trap flags */
V86EFlags |= EFLAGS_IOPL;
/* Build flat ESP */
Esp = (TrapFrame->HardwareSegSs << 4) + TrapFrame->HardwareEsp;
/* Push EFlags */
Esp -= 2;
*(PUSHORT)(Esp) = (USHORT)V86EFlags;
/* Push CS */
Esp -= 2;
*(PUSHORT)(Esp) = (USHORT)TrapFrame->SegCs;
/* Push IP */
Esp -= 2;
*(PUSHORT)(Esp) = (USHORT)TrapFrame->Eip + (Flags & 0xFF) + 1;
/* Update ESP */
TrapFrame->HardwareEsp = (USHORT)Esp;
/* Get flat EIP */
Eip = (TrapFrame->SegCs << 4) + TrapFrame->Eip;
/* Now get the *next* EIP address (current is original + the count - 1) */
Eip += (Flags & 0xFF);
/* Now read the interrupt number */
Interrupt = *(PUCHAR)Eip;
/* Read the EIP from its IVT entry */
Interrupt = *(PULONG)(Interrupt * 4);
TrapFrame->Eip = (USHORT)Interrupt;
/* Now get the CS segment */
Interrupt = (USHORT)(Interrupt >> 16);
/* Check if the trap was not V8086 trap */
if (!(TrapFrame->EFlags & EFLAGS_V86_MASK))
{
/* Was it a kernel CS? */
Interrupt |= RPL_MASK;
if (TrapFrame->SegCs == KGDT_R0_CODE)
{
/* Add the RPL mask */
TrapFrame->SegCs = Interrupt;
}
else
{
/* Set user CS */
TrapFrame->SegCs = KGDT_R3_CODE | RPL_MASK;
}
}
else
{
/* Set IVT CS */
TrapFrame->SegCs = Interrupt;
}
/* We're done */
return TRUE;
}
BOOLEAN
FASTCALL
KiVdmOpcodeIRET(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
ULONG Esp, V86EFlags, EFlags, TrapEFlags, Eip;
/* Build flat ESP */
Esp = (TrapFrame->HardwareSegSs << 4) + TrapFrame->HardwareEsp;
/* Check for OPER32 */
if (Flags & PFX_FLAG_OPER32)
{
/* Build segmented EIP */
TrapFrame->Eip = *(PULONG)Esp;
TrapFrame->SegCs = *(PUSHORT)(Esp + 4);
/* Set new ESP */
TrapFrame->HardwareEsp += 12;
/* Get EFLAGS */
EFlags = *(PULONG)(Esp + 8);
}
else
{
/* Build segmented EIP */
TrapFrame->Eip = *(PUSHORT)Esp;
TrapFrame->SegCs = *(PUSHORT)(Esp + 2);
/* Set new ESP */
TrapFrame->HardwareEsp += 6;
/* Get EFLAGS */
EFlags = *(PUSHORT)(Esp + 4);
}
/* Mask out EFlags */
EFlags &= ~(EFLAGS_IOPL + EFLAGS_VIF + EFLAGS_NESTED_TASK + EFLAGS_VIP);
V86EFlags = EFlags;
/* FIXME: Check for VME support */
/* Add V86 and Interrupt flag */
EFlags |= EFLAGS_V86_MASK | EFLAGS_INTERRUPT_MASK;
/* Update EFlags in trap frame */
TrapEFlags = TrapFrame->EFlags;
TrapFrame->EFlags = (TrapFrame->EFlags & EFLAGS_VIP) | EFlags;
/* Check if ESP0 needs to be fixed up */
if (!(TrapEFlags & EFLAGS_V86_MASK)) Ki386AdjustEsp0(TrapFrame);
/* Update the V8086 EFlags state */
KiVdmClearVdmEFlags(EFLAGS_INTERRUPT_MASK);
KiVdmSetVdmEFlags(V86EFlags);
/* Build flat EIP and check if this is the BOP instruction */
Eip = (TrapFrame->SegCs << 4) + TrapFrame->Eip;
if (*(PUSHORT)Eip == 0xC4C4)
{
/* Dispatch the BOP */
VdmDispatchBop(TrapFrame);
}
else
{
/* FIXME: Check for VDM interrupts */
}
/* We're done */
return TRUE;
}
BOOLEAN
FASTCALL
KiVdmOpcodeCLI(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
/* FIXME: Support VME */
/* disable interrupts */
KiVdmClearVdmEFlags(EFLAGS_INTERRUPT_MASK);
/* Skip instruction */
TrapFrame->Eip += (Flags & 0xFF);
/* Done */
return TRUE;
}
BOOLEAN
FASTCALL
KiVdmOpcodeSTI(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
/* FIXME: Support VME */
/* Enable interrupts */
KiVdmSetVdmEFlags(EFLAGS_INTERRUPT_MASK);
/* Skip instruction */
TrapFrame->Eip += (Flags & 0xFF);
/* Done */
return TRUE;
}
/* MASTER OPCODE HANDLER ******************************************************/
BOOLEAN
FASTCALL
KiVdmHandleOpcode(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
ULONG Eip;
/* Get flat EIP of the *current* instruction (not the original EIP) */
Eip = (TrapFrame->SegCs << 4) + TrapFrame->Eip;
Eip += (Flags & 0xFF) - 1;
/* Read the opcode entry */
switch (*(PUCHAR)Eip)
{
case 0xF: return KiCallVdmHandler(F);
case 0x26: return KiCallVdmPrefixHandler(PFX_FLAG_ES);
case 0x2E: return KiCallVdmPrefixHandler(PFX_FLAG_CS);
case 0x36: return KiCallVdmPrefixHandler(PFX_FLAG_SS);
case 0x3E: return KiCallVdmPrefixHandler(PFX_FLAG_DS);
case 0x64: return KiCallVdmPrefixHandler(PFX_FLAG_FS);
case 0x65: return KiCallVdmPrefixHandler(PFX_FLAG_GS);
case 0x66: return KiCallVdmPrefixHandler(PFX_FLAG_OPER32);
case 0x67: return KiCallVdmPrefixHandler(PFX_FLAG_ADDR32);
case 0xF0: return KiCallVdmPrefixHandler(PFX_FLAG_LOCK);
case 0xF2: return KiCallVdmPrefixHandler(PFX_FLAG_REPNE);
case 0xF3: return KiCallVdmPrefixHandler(PFX_FLAG_REP);
case 0x6C: return KiCallVdmHandler(INSB);
case 0x6D: return KiCallVdmHandler(INSW);
case 0x6E: return KiCallVdmHandler(OUTSB);
case 0x6F: return KiCallVdmHandler(OUTSW);
case 0x98: return KiCallVdmHandler(NPX);
case 0xD8: return KiCallVdmHandler(NPX);
case 0xD9: return KiCallVdmHandler(NPX);
case 0xDA: return KiCallVdmHandler(NPX);
case 0xDB: return KiCallVdmHandler(NPX);
case 0xDC: return KiCallVdmHandler(NPX);
case 0xDD: return KiCallVdmHandler(NPX);
case 0xDE: return KiCallVdmHandler(NPX);
case 0xDF: return KiCallVdmHandler(NPX);
case 0x9C: return KiCallVdmHandler(PUSHF);
case 0x9D: return KiCallVdmHandler(POPF);
case 0xCD: return KiCallVdmHandler(INTnn);
case 0xCE: return KiCallVdmHandler(INTO);
case 0xCF: return KiCallVdmHandler(IRET);
case 0xE4: return KiCallVdmHandler(INBimm);
case 0xE5: return KiCallVdmHandler(INWimm);
case 0xE6: return KiCallVdmHandler(OUTBimm);
case 0xE7: return KiCallVdmHandler(OUTWimm);
case 0xEC: return KiCallVdmHandler(INB);
case 0xED: return KiCallVdmHandler(INW);
case 0xEE: return KiCallVdmHandler(OUTB);
case 0xEF: return KiCallVdmHandler(OUTW);
case 0xF4: return KiCallVdmHandler(HLT);
case 0xFA: return KiCallVdmHandler(CLI);
case 0xFB: return KiCallVdmHandler(STI);
default: return KiCallVdmHandler(INV);
}
}
/* PREFIX HANDLER *************************************************************/
BOOLEAN
FASTCALL
KiVdmOpcodePrefix(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
/* Increase instruction size */
Flags++;
/* Handle the next opcode */
return KiVdmHandleOpcode(TrapFrame, Flags);
}
/* TRAP HANDLER ***************************************************************/
BOOLEAN
FASTCALL
Ki386HandleOpcodeV86(IN PKTRAP_FRAME TrapFrame)
{
/* Clean up */
TrapFrame->Eip &= 0xFFFF;
TrapFrame->HardwareEsp &= 0xFFFF;
/* We start with only 1 byte per instruction */
return KiVdmHandleOpcode(TrapFrame, 1);
}
/* PUBLIC FUNCTIONS ***********************************************************/
#endif
/*
* @implemented