[NETKVM] Import NetKVM network adapter driver by Red Hat

CORE-15841
This commit is contained in:
Nguyen Trung Khanh 2020-03-05 10:58:56 +07:00 committed by Victor Perevertkin
parent 52fb8c1a8d
commit 4c37757e81
No known key found for this signature in database
GPG key ID: C750B7222E9C7830
42 changed files with 13796 additions and 4 deletions

View file

@ -1,5 +1,6 @@
add_subdirectory(e1000)
add_subdirectory(ne2000)
add_subdirectory(netkvm)
add_subdirectory(pcnet)
add_subdirectory(rtl8139)

View file

@ -0,0 +1,37 @@
include_directories(BEFORE common virtio)
add_definitions(
-DNDIS_MINIPORT_DRIVER
-DNDIS51_MINIPORT=1)
list(APPEND SOURCE
common/ParaNdis-Common.c
common/ParaNdis-Oid.c
common/ParaNdis-VirtIO.c
common/ParaNdis-Debug.c
common/sw-offload.c
virtio/VirtIOPCICommon.c
virtio/VirtIOPCILegacy.c
virtio/VirtIOPCIModern.c
virtio/VirtIORing.c
virtio/VirtIORing-Packed.c
wxp/ParaNdis5-Driver.c
wxp/ParaNdis5-Impl.c
wxp/ParaNdis5-Oid.c)
add_library(netkvm MODULE ${SOURCE} wxp/parandis.rc)
set_module_type(netkvm kernelmodedriver)
add_importlibs(netkvm ndis ntoskrnl hal)
add_cd_file(TARGET netkvm DESTINATION reactos/system32/drivers FOR all)
if (NOT MSVC)
add_compile_flags("-Wno-unused-function")
add_compile_flags("-Wno-old-style-declaration")
add_compile_flags("-Wno-unknown-pragmas")
add_compile_flags("-Wno-unused-but-set-variable")
add_compile_flags("-Wno-pointer-sign")
add_compile_flags("-Wno-pointer-to-int-cast")
add_compile_flags("-Wno-int-to-pointer-cast")
add_compile_flags("-Wno-attributes")
endif()

View file

@ -0,0 +1,193 @@
/*
* This file contains definitions and data structures, common between
* NDIS driver and debugger helper unit, processing crash dump with built-in
* data provided by the driver.
*
* Included in NetKVM NDIS kernel driver for Windows.
* Included in NetKVMDumpParser application.
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef PARANDIS_DEBUG_DATA_H
#define PARANDIS_DEBUG_DATA_H
typedef enum _etagHistoryLogOperation
{
hopPowerOff, // common::PowerOff, 1/0 - entry/exit (none, entry, none, none)
hopPowerOn, // common::PowerOn, 1/0 - entry/exit (none, entry, none, none)
hopSysPause, // ndis6::Pause, 1/0 - entry/completion
hopSysResume, // ndis6::Restart, 1/0 - entry/completion
hopInternalSendPause, // implementation, 1/0 - entry/completion
hopInternalReceivePause, // implementation, 1/0 - entry/completion
hopInternalSendResume, // implementation
hopInternalReceiveResume, // implementation
hopSysReset, // implementation driver, 1/0 - entry/completion
hopHalt, // implementation driver, 1/0 - entry/completion
hopConnectIndication, // implementation
hopDPC, // common::DpcWorkBody (1, none, none, none) (0, left, free buffers, free desc)
hopSend, // implementation, when Send requested (nbl, nof lists, nof bufs, nof bytes) (packet, 1, nof packets, none)
hopSendNBLRequest, // ndis6 implementation (nbl, nof packets, none, none)
hopSendPacketRequest, // not used
hopSendPacketMapped, // implementation, before the packet inserted into queue (nbl, which packet, nof frags, none)
hopSubmittedPacket, // implementation, when the packet submitted (nbl, which packet, result, flags)
hopBufferSent, // implementation, when the packet returned from VirtIO queue (nbl, packet no., free buf, free desc)
hopReceiveStat, // common: RX (none, retrieved, reported, ready rx buffers)
hopBufferReturned, // not used
hopSendComplete, // implementation, when the packet completed
hopTxProcess,
hopPacketReceived, // implementation, when the packet prepared for indication (nbl, length, prio tag, type)
hopOidRequest, // implementation, none, OID, on entry(type, 1), on exit (status, 0), on complete (status, 2)
hopPnpEvent // common, none, event, 0, 0
}eHistoryLogOperation;
// {E51FCE18-B3E7-441e-B18C-D9E9B71616F3}
static const GUID ParaNdis_CrashGuid =
{ 0xe51fce18, 0xb3e7, 0x441e, { 0xb1, 0x8c, 0xd9, 0xe9, 0xb7, 0x16, 0x16, 0xf3 } };
/* This structure is NOT changeable */
typedef struct _tagBugCheckStaticDataHeader
{
USHORT SizeOfPointer;
USHORT StaticDataVersion;
USHORT PerNicDataVersion;
USHORT ulMaxContexts;
LARGE_INTEGER qCrashTime;
UINT64 PerNicData;
UINT64 DataArea;
UINT64 DataAreaSize;
}tBugCheckStaticDataHeader;
/* This structure is NOT changeable */
typedef struct _tagBugCheckDataLocation
{
UINT64 Address;
UINT64 Size;
}tBugCheckDataLocation;
#define PARANDIS_DEBUG_STATIC_DATA_VERSION 0
#define PARANDIS_DEBUG_PER_NIC_DATA_VERSION 0
#define PARANDIS_DEBUG_HISTORY_DATA_VERSION 1
/* This structure is NOT changeable */
typedef struct _tagBugCheckStaticDataContent_V0
{
ULONG SizeOfHistory;
ULONG SizeOfHistoryEntry;
LONG CurrentHistoryIndex;
ULONG HistoryDataVersion;
ULONG64 HistoryData;
}tBugCheckStaticDataContent_V0;
#define PARANDIS_DEBUG_INTERRUPTS
#ifdef PARANDIS_DEBUG_INTERRUPTS
# define PARANDIS_STORE_LAST_INTERRUPT_TIMESTAMP(p) \
NdisGetCurrentSystemTime(&(p)->LastInterruptTimeStamp)
# define PARANDIS_GET_LAST_INTERRUPT_TIMESTAMP(p) \
(p)->LastInterruptTimeStamp.QuadPart
#else
# define PARANDIS_STORE_LAST_INTERRUPT_TIMESTAMP(p)
# define PARANDIS_GET_LAST_INTERRUPT_TIMESTAMP(p) (0)
#endif
typedef struct _tagBugCheckPerNicDataContent_V0
{
UINT64 Context;
LARGE_INTEGER LastInterruptTimeStamp;
LARGE_INTEGER LastTxCompletionTimeStamp;
ULONG nofPacketsToComplete;
ULONG nofReadyTxBuffers;
}tBugCheckPerNicDataContent_V0;
typedef struct _tagBugCheckHistoryDataEntry_V0
{
LARGE_INTEGER TimeStamp;
UINT64 Context;
UINT64 pParam1;
ULONG operation;
ULONG lParam2;
ULONG lParam3;
ULONG lParam4;
}tBugCheckHistoryDataEntry_V0;
typedef struct _tagBugCheckHistoryDataEntry_V1
{
LARGE_INTEGER TimeStamp;
UINT64 Context;
ULONG uIRQL;
ULONG uProcessor;
UINT64 pParam1;
ULONG operation;
ULONG lParam2;
ULONG lParam3;
ULONG lParam4;
}tBugCheckHistoryDataEntry_V1;
#if (PARANDIS_DEBUG_STATIC_DATA_VERSION == 0)
typedef tBugCheckStaticDataContent_V0 tBugCheckStaticDataContent;
#endif
#if (PARANDIS_DEBUG_PER_NIC_DATA_VERSION == 0)
typedef tBugCheckPerNicDataContent_V0 tBugCheckPerNicDataContent;
#endif
#if (PARANDIS_DEBUG_HISTORY_DATA_VERSION == 0)
typedef tBugCheckHistoryDataEntry_V0 tBugCheckHistoryDataEntry;
#elif (PARANDIS_DEBUG_HISTORY_DATA_VERSION == 1)
typedef tBugCheckHistoryDataEntry_V1 tBugCheckHistoryDataEntry;
#endif
typedef struct _tagBugCheckStaticDataContent_V1
{
UINT64 res1;
UINT64 res2;
UINT64 History;
}tBugCheckStaticDataContent_V1;
typedef struct _tagBugCheckPerNicDataContent_V1
{
UINT64 Context;
LARGE_INTEGER LastInterruptTimeStamp;
LARGE_INTEGER LastTxCompletionTimeStamp;
ULONG nofPacketsToComplete;
ULONG nofReadyTxBuffers;
}tBugCheckPerNicDataContent_V1;
#if (PARANDIS_DEBUG_HEADER_VERSION == 1)
typedef tBugCheckStaticDataContent_V1 tBugCheckStaticDataContent;
#endif
#if (PARANDIS_DEBUG_PER_NIC_DATA_VERSION == 1)
typedef tBugCheckPerNicDataContent_V1 tBugCheckPerNicDataContent;
#endif
// etc
#endif

View file

@ -0,0 +1,133 @@
/*
* This file contains common guest/host definition, related
* to VirtIO network adapter
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef IONETDESCRIPTOR_H
#define IONETDESCRIPTOR_H
#pragma pack (push)
#pragma pack (1)
/* This is the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header. */
typedef struct _tagvirtio_net_hdr
{
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
#define VIRTIO_NET_HDR_F_DATA_VALID 2 // Host checked checksum, no need to recheck
u8 flags;
#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame
#define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO)
#define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO)
#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP
#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set
u8 gso_type;
u16 hdr_len; // Ethernet + IP + tcp/udp hdrs
u16 gso_size; // Bytes to append to gso_hdr_len per frame
u16 csum_start; // Position to start checksumming from
u16 csum_offset; // Offset after that to place checksum
}virtio_net_hdr_basic;
typedef struct _tagvirtio_net_hdr_ext
{
virtio_net_hdr_basic BasicHeader;
u16 nBuffers;
}virtio_net_hdr_ext;
/*
* Control virtqueue data structures
*
* The control virtqueue expects a header in the first sg entry
* and an ack/status response in the last entry. Data for the
* command goes in between.
*/
typedef struct tag_virtio_net_ctrl_hdr {
u8 class_of_command;
u8 cmd;
}virtio_net_ctrl_hdr;
typedef u8 virtio_net_ctrl_ack;
#define VIRTIO_NET_OK 0
#define VIRTIO_NET_ERR 1
/*
* Control the RX mode, ie. promiscuous, allmulti, etc...
* All commands require an "out" sg entry containing a 1 byte
* state value, zero = disable, non-zero = enable. Commands
* 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
* Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
*/
#define VIRTIO_NET_CTRL_RX_MODE 0
#define VIRTIO_NET_CTRL_RX_MODE_PROMISC 0
#define VIRTIO_NET_CTRL_RX_MODE_ALLMULTI 1
#define VIRTIO_NET_CTRL_RX_MODE_ALLUNI 2
#define VIRTIO_NET_CTRL_RX_MODE_NOMULTI 3
#define VIRTIO_NET_CTRL_RX_MODE_NOUNI 4
#define VIRTIO_NET_CTRL_RX_MODE_NOBCAST 5
/*
* Control the MAC filter table.
*
* The MAC filter table is managed by the hypervisor, the guest should
* assume the size is infinite. Filtering should be considered
* non-perfect, ie. based on hypervisor resources, the guest may
* received packets from sources not specified in the filter list.
*
* In addition to the class/cmd header, the TABLE_SET command requires
* two out scatterlists. Each contains a 4 byte count of entries followed
* by a concatenated byte stream of the ETH_ALEN MAC addresses. The
* first sg list contains unicast addresses, the second is for multicast.
* This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
* is available.
*/
#define ETH_ALEN 6
struct virtio_net_ctrl_mac {
u32 entries;
// follows
//u8 macs[][ETH_ALEN];
};
#define VIRTIO_NET_CTRL_MAC 1
#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
/*
* Control VLAN filtering
*
* The VLAN filter table is controlled via a simple ADD/DEL interface.
* VLAN IDs not added may be filterd by the hypervisor. Del is the
* opposite of add. Both commands expect an out entry containing a 2
* byte VLAN ID. VLAN filtering is available with the
* VIRTIO_NET_F_CTRL_VLAN feature bit.
*/
#define VIRTIO_NET_CTRL_VLAN 2
#define VIRTIO_NET_CTRL_VLAN_ADD 0
#define VIRTIO_NET_CTRL_VLAN_DEL 1
#pragma pack (pop)
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,394 @@
/*
* This file contains debug support procedures, common for NDIS5 and NDIS6
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "ndis56common.h"
#include "stdarg.h"
#include "ntstrsafe.h"
//#define OVERRIDE_DEBUG_BREAK
#ifdef WPP_EVENT_TRACING
#include "ParaNdis-Debug.tmh"
#endif
int virtioDebugLevel = 1;
int nDebugLevel = 1;
int bDebugPrint = 1;
static NDIS_SPIN_LOCK CrashLock;
static KBUGCHECK_REASON_CALLBACK_ROUTINE ParaNdis_OnBugCheck;
static VOID NTAPI ParaNdis_OnBugCheck(
IN KBUGCHECK_CALLBACK_REASON Reason,
IN PKBUGCHECK_REASON_CALLBACK_RECORD Record,
IN OUT PVOID ReasonSpecificData,
IN ULONG ReasonSpecificDataLength
);
static VOID ParaNdis_PrepareBugCheckData();
typedef BOOLEAN (*KeRegisterBugCheckReasonCallbackType) (
__out PKBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord,
__in PKBUGCHECK_REASON_CALLBACK_ROUTINE CallbackRoutine,
__in KBUGCHECK_CALLBACK_REASON Reason,
__in PUCHAR Component
);
typedef BOOLEAN (*KeDeregisterBugCheckReasonCallbackType) (
__inout PKBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord
);
typedef ULONG (*vDbgPrintExType)(
__in ULONG ComponentId,
__in ULONG Level,
__in PCCH Format,
__in va_list arglist
);
static ULONG DummyPrintProcedure(
__in ULONG ComponentId,
__in ULONG Level,
__in PCCH Format,
__in va_list arglist
)
{
return 0;
}
static BOOLEAN KeRegisterBugCheckReasonCallbackDummyProc(
__out PKBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord,
__in PKBUGCHECK_REASON_CALLBACK_ROUTINE CallbackRoutine,
__in KBUGCHECK_CALLBACK_REASON Reason,
__in PUCHAR Component
)
{
CallbackRecord->State = 0;
return FALSE;
}
BOOLEAN KeDeregisterBugCheckReasonCallbackDummyProc(
__inout PKBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord
)
{
return FALSE;
}
static vDbgPrintExType PrintProcedure = DummyPrintProcedure;
static KeRegisterBugCheckReasonCallbackType BugCheckRegisterCallback = KeRegisterBugCheckReasonCallbackDummyProc;
static KeDeregisterBugCheckReasonCallbackType BugCheckDeregisterCallback = KeDeregisterBugCheckReasonCallbackDummyProc;
KBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord;
#if !defined(WPP_EVENT_TRACING) || defined(WPP_USE_BYPASS)
#if defined(DPFLTR_MASK)
//common case, except Win2K
static void DebugPrint(const char *fmt, ...)
{
va_list list;
va_start(list, fmt);
PrintProcedure(DPFLTR_DEFAULT_ID, 9 | DPFLTR_MASK, fmt, list);
#if defined(VIRTIO_DBG_USE_IOPORT)
{
NTSTATUS status;
// use this way of output only for DISPATCH_LEVEL,
// higher requires more protection
if (KeGetCurrentIrql() <= DISPATCH_LEVEL)
{
char buf[256];
size_t len, i;
buf[0] = 0;
status = RtlStringCbVPrintfA(buf, sizeof(buf), fmt, list);
if (status == STATUS_SUCCESS) len = strlen(buf);
else if (status == STATUS_BUFFER_OVERFLOW) len = sizeof(buf);
else { memcpy(buf, "Can't print", 11); len = 11; }
NdisAcquireSpinLock(&CrashLock);
for (i = 0; i < len; ++i)
{
NdisRawWritePortUchar(VIRTIO_DBG_USE_IOPORT, buf[i]);
}
NdisRawWritePortUchar(VIRTIO_DBG_USE_IOPORT, '\n');
NdisReleaseSpinLock(&CrashLock);
}
}
#endif
}
DEBUGPRINTFUNC pDebugPrint = DebugPrint;
DEBUGPRINTFUNC VirtioDebugPrintProc = DebugPrint;
#else //DPFLTR_MASK
#pragma message("DebugPrint for Win2K")
DEBUGPRINTFUNC pDebugPrint = DbgPrint;
DEBUGPRINTFUNC VirtioDebugPrintProc = DbgPrint;
#endif //DPFLTR_MASK
#endif //!defined(WPP_EVENT_TRACING) || defined(WPP_USE_BYPASS)
void _LogOutEntry(int level, const char *s)
{
DPrintf(level, ("[%s]=>", s));
}
void _LogOutExitValue(int level, const char *s, ULONG value)
{
DPrintf(level, ("[%s]<=0x%X", s, value));
}
void _LogOutString(int level, const char *s)
{
DPrintf(level, ("[%s]", s));
}
VOID WppEnableCallback(
__in LPCGUID Guid,
__in __int64 Logger,
__in BOOLEAN Enable,
__in ULONG Flags,
__in UCHAR Level)
{
#if WPP_USE_BYPASS
DPrintfBypass(0, ("[%s] %s, flags %X, level %d",
__FUNCTION__, Enable ? "enabled" : "disabled",
Flags, (ULONG)Level));
#endif
nDebugLevel = Level;
bDebugPrint = Enable;
}
#ifdef OVERRIDE_DEBUG_BREAK
static PUCHAR pDbgBreakPoint;
static UCHAR DbgBreakPointChunk[5];
static void AnotherDbgBreak()
{
DPrintf(0, ("Somebody tried to break into the debugger!"));
}
#endif
void ParaNdis_DebugInitialize(PVOID DriverObject,PVOID RegistryPath)
{
NDIS_STRING usRegister, usDeregister, usPrint;
PVOID pr, pd;
BOOLEAN res;
WPP_INIT_TRACING(DriverObject, RegistryPath);
NdisAllocateSpinLock(&CrashLock);
KeInitializeCallbackRecord(&CallbackRecord);
ParaNdis_PrepareBugCheckData();
NdisInitUnicodeString(&usPrint, L"vDbgPrintEx");
NdisInitUnicodeString(&usRegister, L"KeRegisterBugCheckReasonCallback");
NdisInitUnicodeString(&usDeregister, L"KeDeregisterBugCheckReasonCallback");
pd = MmGetSystemRoutineAddress(&usPrint);
if (pd) PrintProcedure = (vDbgPrintExType)pd;
pr = MmGetSystemRoutineAddress(&usRegister);
pd = MmGetSystemRoutineAddress(&usDeregister);
if (pr && pd)
{
BugCheckRegisterCallback = (KeRegisterBugCheckReasonCallbackType)pr;
BugCheckDeregisterCallback = (KeDeregisterBugCheckReasonCallbackType)pd;
}
res = BugCheckRegisterCallback(&CallbackRecord, ParaNdis_OnBugCheck, KbCallbackSecondaryDumpData, "NetKvm");
DPrintf(0, ("[%s] Crash callback %sregistered", __FUNCTION__, res ? "" : "NOT "));
#ifdef OVERRIDE_DEBUG_BREAK
if (sizeof(PVOID) == sizeof(ULONG))
{
UCHAR replace[5] = {0xe9,0,0,0,0};
ULONG replacement;
NDIS_STRING usDbgBreakPointName;
NdisInitUnicodeString(&usDbgBreakPointName, L"DbgBreakPoint");
pDbgBreakPoint = (PUCHAR)MmGetSystemRoutineAddress(&usDbgBreakPointName);
if (pDbgBreakPoint)
{
DPrintf(0, ("Replacing original BP handler at %p", pDbgBreakPoint));
replacement = RtlPointerToOffset(pDbgBreakPoint + 5, AnotherDbgBreak);
RtlCopyMemory(replace + 1, &replacement, sizeof(replacement));
RtlCopyMemory(DbgBreakPointChunk, pDbgBreakPoint, sizeof(DbgBreakPointChunk));
RtlCopyMemory(pDbgBreakPoint, replace, sizeof(replace));
}
}
#endif
}
void ParaNdis_DebugCleanup(PDRIVER_OBJECT pDriverObject)
{
#ifdef OVERRIDE_DEBUG_BREAK
if (sizeof(PVOID) == sizeof(ULONG) && pDbgBreakPoint)
{
DPrintf(0, ("Restoring original BP handler at %p", pDbgBreakPoint));
RtlCopyMemory(pDbgBreakPoint, DbgBreakPointChunk, sizeof(DbgBreakPointChunk));
}
#endif
BugCheckDeregisterCallback(&CallbackRecord);
WPP_CLEANUP(pDriverObject);
}
#define MAX_CONTEXTS 4
#if defined(ENABLE_HISTORY_LOG)
#define MAX_HISTORY 0x40000
#else
#define MAX_HISTORY 2
#endif
typedef struct _tagBugCheckStaticData
{
tBugCheckStaticDataHeader Header;
tBugCheckPerNicDataContent PerNicData[MAX_CONTEXTS];
tBugCheckStaticDataContent Data;
tBugCheckHistoryDataEntry History[MAX_HISTORY];
}tBugCheckStaticData;
typedef struct _tagBugCheckData
{
tBugCheckStaticData StaticData;
tBugCheckDataLocation Location;
}tBugCheckData;
static tBugCheckData BugCheckData;
static BOOLEAN bNative = TRUE;
VOID ParaNdis_PrepareBugCheckData()
{
BugCheckData.StaticData.Header.StaticDataVersion = PARANDIS_DEBUG_STATIC_DATA_VERSION;
BugCheckData.StaticData.Header.PerNicDataVersion = PARANDIS_DEBUG_PER_NIC_DATA_VERSION;
BugCheckData.StaticData.Header.ulMaxContexts = MAX_CONTEXTS;
BugCheckData.StaticData.Header.SizeOfPointer = sizeof(PVOID);
BugCheckData.StaticData.Header.PerNicData = (UINT_PTR)(PVOID)BugCheckData.StaticData.PerNicData;
BugCheckData.StaticData.Header.DataArea = (UINT64)&BugCheckData.StaticData.Data;
BugCheckData.StaticData.Header.DataAreaSize = sizeof(BugCheckData.StaticData.Data);
BugCheckData.StaticData.Data.HistoryDataVersion = PARANDIS_DEBUG_HISTORY_DATA_VERSION;
BugCheckData.StaticData.Data.SizeOfHistory = MAX_HISTORY;
BugCheckData.StaticData.Data.SizeOfHistoryEntry = sizeof(tBugCheckHistoryDataEntry);
BugCheckData.StaticData.Data.HistoryData = (UINT_PTR)(PVOID)BugCheckData.StaticData.History;
BugCheckData.Location.Address = (UINT64)&BugCheckData;
BugCheckData.Location.Size = sizeof(BugCheckData);
}
void ParaNdis_DebugRegisterMiniport(PARANDIS_ADAPTER *pContext, BOOLEAN bRegister)
{
UINT i;
NdisAcquireSpinLock(&CrashLock);
for (i = 0; i < MAX_CONTEXTS; ++i)
{
UINT64 val1 = bRegister ? 0 : (UINT_PTR)pContext;
UINT64 val2 = bRegister ? (UINT_PTR)pContext : 0;
if (BugCheckData.StaticData.PerNicData[i].Context != val1) continue;
BugCheckData.StaticData.PerNicData[i].Context = val2;
break;
}
NdisReleaseSpinLock(&CrashLock);
}
static UINT FillDataOnBugCheck()
{
UINT i, n = 0;
NdisGetCurrentSystemTime(&BugCheckData.StaticData.Header.qCrashTime);
for (i = 0; i < MAX_CONTEXTS; ++i)
{
tBugCheckPerNicDataContent *pSave = &BugCheckData.StaticData.PerNicData[i];
PARANDIS_ADAPTER *p = (PARANDIS_ADAPTER *)pSave->Context;
if (!p) continue;
pSave->nofPacketsToComplete = p->NetTxPacketsToReturn;
pSave->nofReadyTxBuffers = p->nofFreeHardwareBuffers;
pSave->LastInterruptTimeStamp.QuadPart = PARANDIS_GET_LAST_INTERRUPT_TIMESTAMP(p);
pSave->LastTxCompletionTimeStamp = p->LastTxCompletionTimeStamp;
ParaNdis_CallOnBugCheck(p);
++n;
}
return n;
}
VOID NTAPI ParaNdis_OnBugCheck(
IN KBUGCHECK_CALLBACK_REASON Reason,
IN PKBUGCHECK_REASON_CALLBACK_RECORD Record,
IN OUT PVOID ReasonSpecificData,
IN ULONG ReasonSpecificDataLength
)
{
KBUGCHECK_SECONDARY_DUMP_DATA *pDump = (KBUGCHECK_SECONDARY_DUMP_DATA *)ReasonSpecificData;
if (KbCallbackSecondaryDumpData == Reason && ReasonSpecificDataLength >= sizeof(*pDump))
{
ULONG dumpSize = sizeof(BugCheckData.Location);
if (!pDump->OutBuffer)
{
UINT nSaved;
nSaved = FillDataOnBugCheck();
if (pDump->InBufferLength >= dumpSize)
{
pDump->OutBuffer = pDump->InBuffer;
pDump->OutBufferLength = dumpSize;
}
else
{
pDump->OutBuffer = &BugCheckData.Location;
pDump->OutBufferLength = dumpSize;
bNative = FALSE;
}
DPrintf(0, ("[%s] system buffer of %d, saving data for %d NIC", __FUNCTION__,pDump->InBufferLength, nSaved));
DPrintf(0, ("[%s] using %s buffer", __FUNCTION__, bNative ? "native" : "own"));
}
else if (pDump->OutBuffer == pDump->InBuffer)
{
RtlCopyMemory(&pDump->Guid, &ParaNdis_CrashGuid, sizeof(pDump->Guid));
RtlCopyMemory(pDump->InBuffer, &BugCheckData.Location, dumpSize);
pDump->OutBufferLength = dumpSize;
DPrintf(0, ("[%s] written %d to %p", __FUNCTION__, (ULONG)BugCheckData.Location.Size, (UINT_PTR)BugCheckData.Location.Address ));
DPrintf(0, ("[%s] dump data (%d) at %p", __FUNCTION__, pDump->OutBufferLength, pDump->OutBuffer));
}
}
}
#if defined(ENABLE_HISTORY_LOG)
void ParaNdis_DebugHistory(
PARANDIS_ADAPTER *pContext,
eHistoryLogOperation op,
PVOID pParam1,
ULONG lParam2,
ULONG lParam3,
ULONG lParam4)
{
tBugCheckHistoryDataEntry *phe;
ULONG index = InterlockedIncrement(&BugCheckData.StaticData.Data.CurrentHistoryIndex);
index = (index - 1) % MAX_HISTORY;
phe = &BugCheckData.StaticData.History[index];
phe->Context = (UINT_PTR)pContext;
phe->operation = op;
phe->pParam1 = (UINT_PTR)pParam1;
phe->lParam2 = lParam2;
phe->lParam3 = lParam3;
phe->lParam4 = lParam4;
#if (PARANDIS_DEBUG_HISTORY_DATA_VERSION == 1)
phe->uIRQL = KeGetCurrentIrql();
phe->uProcessor = KeGetCurrentProcessorNumber();
#endif
NdisGetCurrentSystemTime(&phe->TimeStamp);
}
#endif

View file

@ -0,0 +1,677 @@
/*
* This file contains NDIS OID support procedures, common for NDIS5 and NDIS6
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "ParaNdis-Oid.h"
#ifdef WPP_EVENT_TRACING
#include "ParaNdis-Oid.tmh"
#endif
#include <sal.h>
static const char VendorName[] = "Red Hat";
static UCHAR FORCEINLINE hexdigit(UCHAR nibble)
{
UCHAR c = nibble & 0xf;
c += (c <= 9) ? 0 : 7;
c += '0';
return c;
}
/**********************************************************
Common implementation of copy operation when OID is set
pOid->Flags (if used) controls when the source data may be truncated or padded on copy
Parameters:
tOidDesc *pOid - descriptor of OID
PVOID pDest - buffer to receive data sent by NDIS
ULONG ulSize - size of data to copy
Return value:
SUCCESS or NDIS error code if target buffer size is wrong
Rules:
PDEST <>OK SIZE PAYLOAD SZ
NULL any n/a any fail
BUFF any 0 any success, none copied
BUFF any SZ ==SZ success, copied SZ
BUFF !lessok SZ <SZ fail (small), none copied
BUFF !moreok SZ >SZ fail (overflow), none copied
BUFF lessok SZ <SZ success, SZ cleared, payload sz copied
BUFF moreok SZ >SZ success, copied SZ
***************************************************/
NDIS_STATUS ParaNdis_OidSetCopy(
tOidDesc *pOid,
PVOID pDest,
ULONG ulSize)
{
NDIS_STATUS status = NDIS_STATUS_SUCCESS;
if (!pDest)
{
status = NDIS_STATUS_INVALID_OID;
*(pOid->pBytesRead) = 0;
*(pOid->pBytesNeeded) = 0;
}
else if (ulSize)
{
if (pOid->InformationBufferLength < ulSize)
{
if (pOid->ulToDoFlags & ohfSetLessOK)
{
*(pOid->pBytesRead) = pOid->InformationBufferLength;
NdisZeroMemory(pDest, ulSize);
NdisMoveMemory(pDest, pOid->InformationBuffer, pOid->InformationBufferLength);
}
else
{
status = NDIS_STATUS_BUFFER_TOO_SHORT;
*(pOid->pBytesRead) = 0;
*(pOid->pBytesNeeded) = ulSize;
}
}
else if (pOid->InformationBufferLength == ulSize || (pOid->ulToDoFlags & ohfSetMoreOK))
{
*(pOid->pBytesRead) = ulSize;
NdisMoveMemory(pDest, pOid->InformationBuffer, ulSize);
}
else
{
status = NDIS_STATUS_BUFFER_OVERFLOW;
*(pOid->pBytesNeeded) = ulSize;
*(pOid->pBytesRead) = 0;
}
}
else
{
*(pOid->pBytesRead) = pOid->InformationBufferLength;
}
return status;
}
/**********************************************************
Common handler of setting packet filter
***********************************************************/
NDIS_STATUS ParaNdis_OnSetPacketFilter(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
ULONG newValue;
NDIS_STATUS status = ParaNdis_OidSetCopy(
pOid,
&newValue,
sizeof(newValue));
if (newValue & ~PARANDIS_PACKET_FILTERS)
status = NDIS_STATUS_INVALID_DATA;
if (status == NDIS_STATUS_SUCCESS)
{
pContext->PacketFilter = newValue;
DPrintf(1, ("[%s] PACKET FILTER SET TO %x", __FUNCTION__, pContext->PacketFilter));
ParaNdis_UpdateDeviceFilters(pContext);
}
return status;
}
void ParaNdis_FillPowerCapabilities(PNDIS_PNP_CAPABILITIES pCaps)
{
NdisZeroMemory(pCaps, sizeof(*pCaps));
pCaps->WakeUpCapabilities.MinMagicPacketWakeUp = NdisDeviceStateUnspecified;
pCaps->WakeUpCapabilities.MinPatternWakeUp = NdisDeviceStateUnspecified;
pCaps->WakeUpCapabilities.MinLinkChangeWakeUp = NdisDeviceStateUnspecified;
}
/**********************************************************
Common handler of setting multicast list
***********************************************************/
NDIS_STATUS ParaNdis_OnOidSetMulticastList(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
NDIS_STATUS status;
status = ParaNdis_SetMulticastList(
pContext,
pOid->InformationBuffer,
pOid->InformationBufferLength,
pOid->pBytesRead,
pOid->pBytesNeeded);
ParaNdis_UpdateDeviceFilters(pContext);
return status;
}
/**********************************************************
Common helper of copy operation on GET OID
Copies data from specified location to NDIS buffer
64-bit variable will be casted to 32-bit, if specified on pOid->Flags
Parameters:
tOidDesc *pOid - descriptor of OID
PVOID pInfo - source to copy from
ULONG ulSize - source info size
Return value:
SUCCESS or kind of failure when the dest buffer size is wrong
Comments:
pInfo must be non-NULL, otherwise error returned
ulSize may be 0, then SUCCESS returned without copy
***********************************************************/
NDIS_STATUS ParaNdis_OidQueryCopy(
tOidDesc *pOid,
PVOID pInfo,
ULONG ulSize,
BOOLEAN bFreeInfo)
{
NDIS_STATUS status = NDIS_STATUS_SUCCESS;
*(pOid->pBytesNeeded) = ulSize;
if (!pInfo)
{
status = NDIS_STATUS_INVALID_OID;
*(pOid->pBytesWritten) = 0;
*(pOid->pBytesNeeded) = 0;
}
else if (pOid->InformationBufferLength >= ulSize)
{
if (ulSize) NdisMoveMemory(pOid->InformationBuffer, pInfo, ulSize);
*(pOid->pBytesWritten) = ulSize;
*(pOid->pBytesNeeded) = 0;
}
else if ((pOid->ulToDoFlags & ohfQuery3264) && pOid->InformationBufferLength == sizeof(ULONG) && ulSize == sizeof(ULONG64))
{
ULONG64 u64 = *(ULONG64 *)pInfo;
ULONG ul = (ULONG)u64;
NdisMoveMemory(pOid->InformationBuffer, &ul, sizeof(ul));
*(pOid->pBytesWritten) = sizeof(ul);
}
else
{
status = NDIS_STATUS_BUFFER_TOO_SHORT;
*(pOid->pBytesWritten) = 0;
}
if (bFreeInfo && pInfo)
{
NdisFreeMemory(pInfo, 0, 0);
}
return status;
}
/**********************************************************
Common handler of Oid queries
Parameters:
context
tOidDesc *pOid - filled descriptor of OID operation
Return value:
SUCCESS or kind of failure
***********************************************************/
NDIS_STATUS ParaNdis_OidQueryCommon(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
NDIS_STATUS status = NDIS_STATUS_SUCCESS;
PVOID pInfo = NULL;
ULONG ulSize = 0;
BOOLEAN bFreeInfo = FALSE;
union _tagtemp
{
NDIS_MEDIUM Medium;
ULONG64 ul64;
ULONG ul;
USHORT us;
NDIS_PNP_CAPABILITIES PMCaps;
} u;
#if defined(_MSC_VER)
#define CONCATFIELD(object, field) object.##field
#else
#define CONCATFIELD(object, field) object.field
#endif
#define SETINFO(field, value) pInfo = CONCATFIELD(&u, field); ulSize = sizeof(CONCATFIELD(u, field)); CONCATFIELD(u, field) = (value)
switch (pOid->Oid)
{
case OID_GEN_SUPPORTED_LIST:
ParaNdis_GetSupportedOid(&pInfo, &ulSize);
break;
case OID_GEN_HARDWARE_STATUS:
SETINFO(ul, NdisHardwareStatusReady);
break;
case OID_GEN_MEDIA_SUPPORTED:
__fallthrough;
case OID_GEN_MEDIA_IN_USE:
SETINFO(Medium, NdisMedium802_3);
break;
case OID_GEN_MAXIMUM_LOOKAHEAD:
SETINFO(ul, pContext->MaxPacketSize.nMaxFullSizeOS);
break;
case OID_GEN_MAXIMUM_FRAME_SIZE:
SETINFO(ul, pContext->MaxPacketSize.nMaxDataSize);
break;
case OID_GEN_TRANSMIT_BUFFER_SPACE:
SETINFO(ul, pContext->MaxPacketSize.nMaxFullSizeOS * pContext->nofFreeTxDescriptors);
break;
case OID_GEN_RECEIVE_BUFFER_SPACE:
SETINFO(ul, pContext->MaxPacketSize.nMaxFullSizeOS * pContext->NetMaxReceiveBuffers);
break;
case OID_GEN_RECEIVE_BLOCK_SIZE:
__fallthrough;
case OID_GEN_TRANSMIT_BLOCK_SIZE:
__fallthrough;
case OID_GEN_MAXIMUM_TOTAL_SIZE:
SETINFO(ul, pContext->MaxPacketSize.nMaxFullSizeOS);
break;
case OID_GEN_TRANSMIT_QUEUE_LENGTH:
// TODO: this is not completely correct, but only if
// the TX queue is not full
SETINFO(ul, pContext->maxFreeTxDescriptors - pContext->nofFreeTxDescriptors);
break;
case OID_GEN_VENDOR_ID:
SETINFO(ul, 0x00ffffff);
break;
case OID_GEN_VENDOR_DESCRIPTION:
pInfo = (PVOID)VendorName;
ulSize = sizeof(VendorName);
break;
case OID_GEN_VENDOR_DRIVER_VERSION:
SETINFO(ul, (NDIS_MINIPORT_MAJOR_VERSION << 16) | NDIS_MINIPORT_MINOR_VERSION);
break;
case OID_GEN_CURRENT_PACKET_FILTER:
pInfo = &pContext->PacketFilter;
ulSize = sizeof(pContext->PacketFilter);
break;
case OID_GEN_DRIVER_VERSION:
SETINFO(us, ((NDIS_MINIPORT_MAJOR_VERSION << 8) | NDIS_MINIPORT_MINOR_VERSION));
break;
case OID_GEN_MAC_OPTIONS:
{
ULONG options = NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA |
NDIS_MAC_OPTION_TRANSFERS_NOT_PEND |
NDIS_MAC_OPTION_NO_LOOPBACK;
if (IsPrioritySupported(pContext))
options |= NDIS_MAC_OPTION_8021P_PRIORITY;
if (IsVlanSupported(pContext))
options |= NDIS_MAC_OPTION_8021Q_VLAN;
SETINFO(ul, options);
}
break;
case OID_GEN_MEDIA_CONNECT_STATUS:
SETINFO(ul, pContext->bConnected ? NdisMediaStateConnected : NdisMediaStateDisconnected);
//NdisMediaStateConnected:
break;
case OID_GEN_MAXIMUM_SEND_PACKETS:
// NDIS ignores it for deserialized drivers
SETINFO(ul,pContext->nofFreeTxDescriptors);
break;
case OID_802_3_PERMANENT_ADDRESS:
pInfo = pContext->PermanentMacAddress;
ulSize = sizeof(pContext->PermanentMacAddress);
break;
case OID_802_3_CURRENT_ADDRESS:
pInfo = pContext->CurrentMacAddress;
ulSize = sizeof(pContext->CurrentMacAddress);
break;
case OID_PNP_QUERY_POWER:
// size if 0, just to indicate success
pInfo = &status;
break;
case OID_GEN_DIRECTED_BYTES_XMIT:
SETINFO(ul64, pContext->Statistics.ifHCOutUcastOctets);
break;
case OID_GEN_DIRECTED_FRAMES_XMIT:
SETINFO(ul64, pContext->Statistics.ifHCOutUcastPkts);
break;
case OID_GEN_MULTICAST_BYTES_XMIT:
SETINFO(ul64, pContext->Statistics.ifHCOutMulticastOctets);
break;
case OID_GEN_MULTICAST_FRAMES_XMIT:
SETINFO(ul64, pContext->Statistics.ifHCOutMulticastPkts);
break;
case OID_GEN_BROADCAST_BYTES_XMIT:
SETINFO(ul64, pContext->Statistics.ifHCOutBroadcastOctets);
break;
case OID_GEN_BROADCAST_FRAMES_XMIT:
SETINFO(ul64, pContext->Statistics.ifHCOutBroadcastPkts);
break;
case OID_GEN_DIRECTED_BYTES_RCV:
SETINFO(ul64, pContext->Statistics.ifHCInUcastOctets);
break;
case OID_GEN_DIRECTED_FRAMES_RCV:
SETINFO(ul64, pContext->Statistics.ifHCInUcastPkts);
break;
case OID_GEN_MULTICAST_BYTES_RCV:
SETINFO(ul64, pContext->Statistics.ifHCInMulticastOctets);
break;
case OID_GEN_MULTICAST_FRAMES_RCV:
SETINFO(ul64, pContext->Statistics.ifHCInMulticastPkts);
break;
case OID_GEN_BROADCAST_BYTES_RCV:
SETINFO(ul64, pContext->Statistics.ifHCInBroadcastOctets);
break;
case OID_GEN_BROADCAST_FRAMES_RCV:
SETINFO(ul64, pContext->Statistics.ifHCInBroadcastPkts);
break;
case OID_GEN_XMIT_OK:
SETINFO(ul64,
pContext->Statistics.ifHCOutUcastPkts +
pContext->Statistics.ifHCOutMulticastPkts +
pContext->Statistics.ifHCOutBroadcastPkts);
break;
case OID_GEN_RCV_OK:
SETINFO(ul64,
pContext->Statistics.ifHCInUcastPkts +
pContext->Statistics.ifHCInMulticastPkts +
pContext->Statistics.ifHCInBroadcastPkts);
DPrintf(4, ("[%s] Total frames %I64u", __FUNCTION__, u.ul64));
break;
case OID_GEN_XMIT_ERROR:
SETINFO(ul64, pContext->Statistics.ifOutErrors );
break;
case OID_GEN_RCV_ERROR:
__fallthrough;
case OID_GEN_RCV_NO_BUFFER:
__fallthrough;
case OID_802_3_RCV_OVERRUN:
__fallthrough;
case OID_GEN_RCV_CRC_ERROR:
__fallthrough;
case OID_802_3_RCV_ERROR_ALIGNMENT:
__fallthrough;
case OID_802_3_XMIT_UNDERRUN:
__fallthrough;
case OID_802_3_XMIT_ONE_COLLISION:
__fallthrough;
case OID_802_3_XMIT_DEFERRED:
__fallthrough;
case OID_802_3_XMIT_MAX_COLLISIONS:
__fallthrough;
case OID_802_3_XMIT_MORE_COLLISIONS:
__fallthrough;
case OID_802_3_XMIT_HEARTBEAT_FAILURE:
__fallthrough;
case OID_802_3_XMIT_TIMES_CRS_LOST:
__fallthrough;
case OID_802_3_XMIT_LATE_COLLISIONS:
SETINFO(ul64, 0);
break;
case OID_802_3_MULTICAST_LIST:
pInfo = pContext->MulticastData.MulticastList;
ulSize = pContext->MulticastData.nofMulticastEntries * ETH_LENGTH_OF_ADDRESS;
break;
case OID_802_3_MAXIMUM_LIST_SIZE:
SETINFO(ul, PARANDIS_MULTICAST_LIST_SIZE);
break;
case OID_PNP_CAPABILITIES:
pInfo = &u.PMCaps;
ulSize = sizeof(u.PMCaps);
ParaNdis_FillPowerCapabilities(&u.PMCaps);
break;
case OID_802_3_MAC_OPTIONS:
SETINFO(ul, 0);
break;
case OID_GEN_VLAN_ID:
SETINFO(ul, pContext->VlanId);
if (!IsVlanSupported(pContext))
status = NDIS_STATUS_NOT_SUPPORTED;
break;
case OID_GEN_CURRENT_LOOKAHEAD:
if (!pContext->DummyLookAhead) pContext->DummyLookAhead = pContext->MaxPacketSize.nMaxFullSizeOS;
pInfo = &pContext->DummyLookAhead;
ulSize = sizeof(pContext->DummyLookAhead);
break;
case OID_PNP_ENABLE_WAKE_UP:
SETINFO(ul, pContext->ulEnableWakeup);
break;
default:
status = NDIS_STATUS_NOT_SUPPORTED;
break;
}
if (status == NDIS_STATUS_SUCCESS)
{
status = ParaNdis_OidQueryCopy(pOid, pInfo, ulSize, bFreeInfo);
}
return status;
}
/**********************************************************
Just gets OID name
***********************************************************/
const char *ParaNdis_OidName(NDIS_OID oid)
{
#undef MAKECASE
#define MAKECASE(id) case id: return #id;
switch (oid)
{
MAKECASE(OID_GEN_SUPPORTED_LIST)
MAKECASE(OID_GEN_HARDWARE_STATUS)
MAKECASE(OID_GEN_MEDIA_SUPPORTED)
MAKECASE(OID_GEN_MEDIA_IN_USE)
MAKECASE(OID_GEN_MAXIMUM_LOOKAHEAD)
MAKECASE(OID_GEN_MAXIMUM_FRAME_SIZE)
MAKECASE(OID_GEN_LINK_SPEED)
MAKECASE(OID_GEN_TRANSMIT_BUFFER_SPACE)
MAKECASE(OID_GEN_RECEIVE_BUFFER_SPACE)
MAKECASE(OID_GEN_TRANSMIT_BLOCK_SIZE)
MAKECASE(OID_GEN_RECEIVE_BLOCK_SIZE)
MAKECASE(OID_GEN_VENDOR_ID)
MAKECASE(OID_GEN_VENDOR_DESCRIPTION)
MAKECASE(OID_GEN_CURRENT_PACKET_FILTER)
MAKECASE(OID_GEN_CURRENT_LOOKAHEAD)
MAKECASE(OID_GEN_DRIVER_VERSION)
MAKECASE(OID_GEN_MAXIMUM_TOTAL_SIZE)
MAKECASE(OID_GEN_PROTOCOL_OPTIONS)
MAKECASE(OID_GEN_MAC_OPTIONS)
MAKECASE(OID_GEN_MEDIA_CONNECT_STATUS)
MAKECASE(OID_GEN_MAXIMUM_SEND_PACKETS)
MAKECASE(OID_GEN_VENDOR_DRIVER_VERSION)
MAKECASE(OID_GEN_SUPPORTED_GUIDS)
MAKECASE(OID_GEN_TRANSPORT_HEADER_OFFSET)
MAKECASE(OID_GEN_MEDIA_CAPABILITIES)
MAKECASE(OID_GEN_PHYSICAL_MEDIUM)
MAKECASE(OID_GEN_XMIT_OK)
MAKECASE(OID_GEN_RCV_OK)
MAKECASE(OID_GEN_XMIT_ERROR)
MAKECASE(OID_GEN_RCV_ERROR)
MAKECASE(OID_GEN_RCV_NO_BUFFER)
MAKECASE(OID_GEN_DIRECTED_BYTES_XMIT)
MAKECASE(OID_GEN_DIRECTED_FRAMES_XMIT)
MAKECASE(OID_GEN_MULTICAST_BYTES_XMIT)
MAKECASE(OID_GEN_MULTICAST_FRAMES_XMIT)
MAKECASE(OID_GEN_BROADCAST_BYTES_XMIT)
MAKECASE(OID_GEN_BROADCAST_FRAMES_XMIT)
MAKECASE(OID_GEN_DIRECTED_BYTES_RCV)
MAKECASE(OID_GEN_DIRECTED_FRAMES_RCV)
MAKECASE(OID_GEN_MULTICAST_BYTES_RCV)
MAKECASE(OID_GEN_MULTICAST_FRAMES_RCV)
MAKECASE(OID_GEN_BROADCAST_BYTES_RCV)
MAKECASE(OID_GEN_BROADCAST_FRAMES_RCV)
MAKECASE(OID_GEN_RCV_CRC_ERROR)
MAKECASE(OID_GEN_TRANSMIT_QUEUE_LENGTH)
MAKECASE(OID_GEN_GET_TIME_CAPS)
MAKECASE(OID_GEN_GET_NETCARD_TIME)
MAKECASE(OID_GEN_NETCARD_LOAD)
MAKECASE(OID_GEN_DEVICE_PROFILE)
MAKECASE(OID_GEN_INIT_TIME_MS)
MAKECASE(OID_GEN_RESET_COUNTS)
MAKECASE(OID_GEN_MEDIA_SENSE_COUNTS)
MAKECASE(OID_GEN_VLAN_ID)
MAKECASE(OID_PNP_CAPABILITIES)
MAKECASE(OID_PNP_SET_POWER)
MAKECASE(OID_PNP_QUERY_POWER)
MAKECASE(OID_PNP_ADD_WAKE_UP_PATTERN)
MAKECASE(OID_PNP_REMOVE_WAKE_UP_PATTERN)
MAKECASE(OID_PNP_ENABLE_WAKE_UP)
MAKECASE(OID_802_3_PERMANENT_ADDRESS)
MAKECASE(OID_802_3_CURRENT_ADDRESS)
MAKECASE(OID_802_3_MULTICAST_LIST)
MAKECASE(OID_802_3_MAXIMUM_LIST_SIZE)
MAKECASE(OID_802_3_MAC_OPTIONS)
MAKECASE(OID_802_3_RCV_ERROR_ALIGNMENT)
MAKECASE(OID_802_3_XMIT_ONE_COLLISION)
MAKECASE(OID_802_3_XMIT_MORE_COLLISIONS)
MAKECASE(OID_802_3_XMIT_DEFERRED)
MAKECASE(OID_802_3_XMIT_MAX_COLLISIONS)
MAKECASE(OID_802_3_RCV_OVERRUN)
MAKECASE(OID_802_3_XMIT_UNDERRUN)
MAKECASE(OID_802_3_XMIT_HEARTBEAT_FAILURE)
MAKECASE(OID_802_3_XMIT_TIMES_CRS_LOST)
MAKECASE(OID_802_3_XMIT_LATE_COLLISIONS)
MAKECASE(OID_GEN_MACHINE_NAME)
MAKECASE(OID_TCP_TASK_OFFLOAD)
MAKECASE(OID_TCP_OFFLOAD_PARAMETERS)
MAKECASE(OID_OFFLOAD_ENCAPSULATION)
MAKECASE(OID_IP4_OFFLOAD_STATS)
MAKECASE(OID_IP6_OFFLOAD_STATS)
default:
{
static UCHAR buffer[9];
UINT i;
for (i = 0; i < 8; ++i)
{
UCHAR nibble = (UCHAR)((oid >> (28 - i * 4)) & 0xf);
buffer[i] = hexdigit(nibble);
}
return (char *)buffer;
}
}
}
/**********************************************************
Checker of valid size of provided wake-up patter
Return value: SUCCESS or kind of failure where the buffer is wrong
***********************************************************/
static NDIS_STATUS ValidateWakeupPattern(PNDIS_PM_PACKET_PATTERN p, PULONG pValidSize)
{
NDIS_STATUS status = NDIS_STATUS_BUFFER_TOO_SHORT;
if (*pValidSize < sizeof(*p))
{
*pValidSize = sizeof(*p);
}
else
{
ULONG ul = p->PatternOffset + p->PatternSize;
if (*pValidSize >= ul) status = NDIS_STATUS_SUCCESS;
*pValidSize = ul;
DPrintf(2, ("[%s] pattern of %d at %d, mask %d (%s)",
__FUNCTION__, p->PatternSize, p->PatternOffset, p->MaskSize,
status == NDIS_STATUS_SUCCESS ? "OK" : "Fail"));
}
return status;
}
/**********************************************************
Common handler of wake-up pattern addition
***********************************************************/
NDIS_STATUS ParaNdis_OnAddWakeupPattern(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
NDIS_STATUS status;
PNDIS_PM_PACKET_PATTERN pPmPattern = (PNDIS_PM_PACKET_PATTERN) pOid->InformationBuffer;
ULONG ulValidSize = pOid->InformationBufferLength;
status = ValidateWakeupPattern(pPmPattern, &ulValidSize);
if (status == NDIS_STATUS_SUCCESS)
{
*pOid->pBytesRead = ulValidSize;
}
else
{
*pOid->pBytesRead = 0;
*pOid->pBytesNeeded = ulValidSize;
}
// TODO: Apply
return status;
}
/**********************************************************
Common handler of wake-up pattern removal
***********************************************************/
NDIS_STATUS ParaNdis_OnRemoveWakeupPattern(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
NDIS_STATUS status;
PNDIS_PM_PACKET_PATTERN pPmPattern = (PNDIS_PM_PACKET_PATTERN) pOid->InformationBuffer;
ULONG ulValidSize = pOid->InformationBufferLength;
status = ValidateWakeupPattern(pPmPattern, &ulValidSize);
if (status == NDIS_STATUS_SUCCESS)
{
*pOid->pBytesRead = ulValidSize;
}
else
{
*pOid->pBytesRead = 0;
*pOid->pBytesNeeded = ulValidSize;
}
return status;
}
/**********************************************************
Common handler of wake-up enabling upon standby
***********************************************************/
NDIS_STATUS ParaNdis_OnEnableWakeup(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
NDIS_STATUS status = ParaNdis_OidSetCopy(pOid, &pContext->ulEnableWakeup, sizeof(pContext->ulEnableWakeup));
if (status == NDIS_STATUS_SUCCESS)
{
DPrintf(0, ("[%s] new value %lX", __FUNCTION__, pContext->ulEnableWakeup));
}
return status;
}
/**********************************************************
Dummy implementation
***********************************************************/
NDIS_STATUS ParaNdis_OnSetLookahead(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
return ParaNdis_OidSetCopy(pOid, &pContext->DummyLookAhead, sizeof(pContext->DummyLookAhead));
}
NDIS_STATUS ParaNdis_OnSetVlanId(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
NDIS_STATUS status = NDIS_STATUS_NOT_SUPPORTED;
if (IsVlanSupported(pContext))
{
status = ParaNdis_OidSetCopy(pOid, &pContext->VlanId, sizeof(pContext->VlanId));
pContext->VlanId &= 0xfff;
DPrintf(0, ("[%s] new value %d on MAC %X", __FUNCTION__, pContext->VlanId, pContext->CurrentMacAddress[5]));
ParaNdis_DeviceFiltersUpdateVlanId(pContext);
}
return status;
}
/**********************************************************
Retrieves support rules for specific OID
***********************************************************/
void ParaNdis_GetOidSupportRules(NDIS_OID oid, tOidWhatToDo *pRule, const tOidWhatToDo *Table)
{
static const tOidWhatToDo defaultRule = { 0, 0, 0, 0, 0, NULL, "Unknown OID" };
UINT i;
*pRule = defaultRule;
pRule->oid = oid;
for (i = 0; Table[i].oid != 0; ++i)
{
if (Table[i].oid == oid)
{
*pRule = Table[i];
break;
}
}
pRule->name = ParaNdis_OidName(oid);
}

View file

@ -0,0 +1,104 @@
/*
* This file contains common for NDIS5/NDIS6 definition,
* related to OID support
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef PARANDIS_COMMON_OID_H
#define PARANDIS_COMMON_OID_H
#include "ndis56common.h"
/**********************************************************
Wrapper for all the data, related to any OID request
***********************************************************/
typedef struct _tagOidDesc
{
NDIS_OID Oid; // oid code
ULONG ulToDoFlags; // combination of eOidHelperFlags
PVOID InformationBuffer; // buffer received from NDIS
UINT InformationBufferLength; // its length
PUINT pBytesWritten; // OUT for query/method
PUINT pBytesNeeded; // OUT for query/set/method when length of buffer is wrong
PUINT pBytesRead; // OUT for set/method
PVOID Reserved; // Reserved for pending requests
} tOidDesc;
typedef NDIS_STATUS (*OIDHANDLERPROC)(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
typedef struct _tagOidWhatToDo
{
NDIS_OID oid; // oid number
int nEntryLevel; // do print on entry level
int nExitFailLevel; // do print on exit if failed
int nExitOKLevel; // do print on exit if OK
UINT Flags;
OIDHANDLERPROC OidSetProc; // procedure to call on SET
const char *name; // printable name
}tOidWhatToDo;
typedef enum _tageOidHelperFlags {
ohfQuery = 1, // can be queried
ohfSet = 2, // can be set
ohfQuerySet = ohfQuery | ohfSet,
ohfQueryStatOnly = 4, // redirect query stat to query
ohfQueryStat = ohfQueryStatOnly | ohfQuery,
ohfQuery3264 = 8 | ohfQuery, // convert 64 to 32 on query
ohfQueryStat3264 = 8 | ohfQueryStat, // convert 64 to 32 on query stat
ohfSetLessOK = 16, // on set: if buffer is smaller, cleanup and copy
ohfSetMoreOK = 32 // on set: if buffer is larger, copy anyway
} eOidHelperFlags;
/**********************************************************
Common procedures related to OID support
***********************************************************/
NDIS_STATUS ParaNdis_OidQueryCommon(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
NDIS_STATUS ParaNdis_OidQueryCopy(tOidDesc *pOid, PVOID pInfo, ULONG ulSize, BOOLEAN bFreeInfo);
static NDIS_STATUS ParaNdis_OidQuery(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
NDIS_STATUS ParaNdis_OnOidSetMulticastList(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
NDIS_STATUS ParaNdis_OnSetPacketFilter(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
NDIS_STATUS ParaNdis_OnAddWakeupPattern(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
NDIS_STATUS ParaNdis_OnRemoveWakeupPattern(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
NDIS_STATUS ParaNdis_OnEnableWakeup(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
NDIS_STATUS ParaNdis_OnSetLookahead(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
NDIS_STATUS ParaNdis_OnSetVlanId(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
NDIS_STATUS ParaNdis_OidSetCopy(tOidDesc *pOid, PVOID pDest, ULONG ulSize);
void ParaNdis_FillPowerCapabilities(PNDIS_PNP_CAPABILITIES pCaps);
void ParaNdis_GetOidSupportRules(NDIS_OID oid, tOidWhatToDo *pRule, const tOidWhatToDo *Table);
const char *ParaNdis_OidName(NDIS_OID oid);
/**********************************************************
Procedures to be implemented in NDIS5/NDIS6 specific modules
***********************************************************/
void ParaNdis_GetSupportedOid(PVOID *pOidsArray, PULONG pLength);
NDIS_STATUS ParaNdis_OnSetPower(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
#endif

View file

@ -0,0 +1,389 @@
/*
* This file contains NDIS driver VirtIO callbacks
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "ndis56common.h"
/////////////////////////////////////////////////////////////////////////////////////
//
// ReadVirtIODeviceRegister\WriteVirtIODeviceRegister
// NDIS specific implementation of the IO and memory space read\write
//
// The lower 64k of memory is never mapped so we can use the same routines
// for both port I/O and memory access and use the address alone to decide
// which space to use.
/////////////////////////////////////////////////////////////////////////////////////
#define PORT_MASK 0xFFFF
static u32 ReadVirtIODeviceRegister(ULONG_PTR ulRegister)
{
ULONG ulValue;
if (ulRegister & ~PORT_MASK) {
NdisReadRegisterUlong(ulRegister, &ulValue);
} else {
NdisRawReadPortUlong(ulRegister, &ulValue);
}
DPrintf(6, ("[%s]R[%x]=%x", __FUNCTION__, (ULONG)ulRegister, ulValue));
return ulValue;
}
static void WriteVirtIODeviceRegister(ULONG_PTR ulRegister, u32 ulValue)
{
DPrintf(6, ("[%s]R[%x]=%x", __FUNCTION__, (ULONG)ulRegister, ulValue));
if (ulRegister & ~PORT_MASK) {
NdisWriteRegisterUlong((PULONG)ulRegister, ulValue);
} else {
NdisRawWritePortUlong(ulRegister, ulValue);
}
}
static u8 ReadVirtIODeviceByte(ULONG_PTR ulRegister)
{
u8 bValue;
if (ulRegister & ~PORT_MASK) {
NdisReadRegisterUchar(ulRegister, &bValue);
} else {
NdisRawReadPortUchar(ulRegister, &bValue);
}
DPrintf(6, ("[%s]R[%x]=%x", __FUNCTION__, (ULONG)ulRegister, bValue));
return bValue;
}
static void WriteVirtIODeviceByte(ULONG_PTR ulRegister, u8 bValue)
{
DPrintf(6, ("[%s]R[%x]=%x", __FUNCTION__, (ULONG)ulRegister, bValue));
if (ulRegister & ~PORT_MASK) {
NdisWriteRegisterUchar((PUCHAR)ulRegister, bValue);
} else {
NdisRawWritePortUchar(ulRegister, bValue);
}
}
static u16 ReadVirtIODeviceWord(ULONG_PTR ulRegister)
{
u16 wValue;
if (ulRegister & ~PORT_MASK) {
NdisReadRegisterUshort(ulRegister, &wValue);
} else {
NdisRawReadPortUshort(ulRegister, &wValue);
}
DPrintf(6, ("[%s]R[%x]=%x\n", __FUNCTION__, (ULONG)ulRegister, wValue));
return wValue;
}
static void WriteVirtIODeviceWord(ULONG_PTR ulRegister, u16 wValue)
{
#if 1
if (ulRegister & ~PORT_MASK) {
NdisWriteRegisterUshort((PUSHORT)ulRegister, wValue);
} else {
NdisRawWritePortUshort(ulRegister, wValue);
}
#else
// test only to cause long TX waiting queue of NDIS packets
// to recognize it and request for reset via Hang handler
static int nCounterToFail = 0;
static const int StartFail = 200, StopFail = 600;
BOOLEAN bFail = FALSE;
DPrintf(6, ("%s> R[%x] = %x\n", __FUNCTION__, (ULONG)ulRegister, wValue));
if ((ulRegister & 0x1F) == 0x10)
{
nCounterToFail++;
bFail = nCounterToFail >= StartFail && nCounterToFail < StopFail;
}
if (!bFail) NdisRawWritePortUshort(ulRegister, wValue);
else
{
DPrintf(0, ("%s> FAILING R[%x] = %x\n", __FUNCTION__, (ULONG)ulRegister, wValue));
}
#endif
}
static void *mem_alloc_contiguous_pages(void *context, size_t size)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context;
PVOID retVal = NULL;
ULONG i;
/* find the first unused memory range of the requested size */
for (i = 0; i < MAX_NUM_OF_QUEUES; i++) {
if (pContext->SharedMemoryRanges[i].pBase != NULL &&
pContext->SharedMemoryRanges[i].bUsed == FALSE &&
pContext->SharedMemoryRanges[i].uLength == (ULONG)size) {
retVal = pContext->SharedMemoryRanges[i].pBase;
pContext->SharedMemoryRanges[i].bUsed = TRUE;
break;
}
}
if (!retVal) {
/* find the first null memory range descriptor and allocate */
for (i = 0; i < MAX_NUM_OF_QUEUES; i++) {
if (pContext->SharedMemoryRanges[i].pBase == NULL) {
break;
}
}
if (i < MAX_NUM_OF_QUEUES) {
NdisMAllocateSharedMemory(
pContext->MiniportHandle,
(ULONG)size,
TRUE /* Cached */,
&pContext->SharedMemoryRanges[i].pBase,
&pContext->SharedMemoryRanges[i].BasePA);
retVal = pContext->SharedMemoryRanges[i].pBase;
if (retVal) {
NdisZeroMemory(retVal, size);
pContext->SharedMemoryRanges[i].uLength = (ULONG)size;
pContext->SharedMemoryRanges[i].bUsed = TRUE;
}
}
}
if (retVal) {
DPrintf(6, ("[%s] returning %p, size %x\n", __FUNCTION__, retVal, (ULONG)size));
} else {
DPrintf(0, ("[%s] failed to allocate size %x\n", __FUNCTION__, (ULONG)size));
}
return retVal;
}
static void mem_free_contiguous_pages(void *context, void *virt)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context;
ULONG i;
for (i = 0; i < MAX_NUM_OF_QUEUES; i++) {
if (pContext->SharedMemoryRanges[i].pBase == virt) {
pContext->SharedMemoryRanges[i].bUsed = FALSE;
break;
}
}
if (i < MAX_NUM_OF_QUEUES) {
DPrintf(6, ("[%s] freed %p at index %d\n", __FUNCTION__, virt, i));
} else {
DPrintf(0, ("[%s] failed to free %p\n", __FUNCTION__, virt));
}
}
static ULONGLONG mem_get_physical_address(void *context, void *virt)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context;
ULONG_PTR uAddr = (ULONG_PTR)virt;
ULONG i;
for (i = 0; i < MAX_NUM_OF_QUEUES; i++) {
ULONG_PTR uBase = (ULONG_PTR)pContext->SharedMemoryRanges[i].pBase;
if (uAddr >= uBase && uAddr < (uBase + pContext->SharedMemoryRanges[i].uLength)) {
ULONGLONG retVal = pContext->SharedMemoryRanges[i].BasePA.QuadPart + (uAddr - uBase);
DPrintf(6, ("[%s] translated %p to %I64X\n", __FUNCTION__, virt, retVal));
return retVal;
}
}
DPrintf(0, ("[%s] failed to translate %p\n", __FUNCTION__, virt));
return 0;
}
static void *mem_alloc_nonpaged_block(void *context, size_t size)
{
PVOID retVal;
if (NdisAllocateMemoryWithTag(
&retVal,
(UINT)size,
PARANDIS_MEMORY_TAG) != NDIS_STATUS_SUCCESS) {
retVal = NULL;
}
if (retVal) {
NdisZeroMemory(retVal, size);
DPrintf(6, ("[%s] returning %p, len %x\n", __FUNCTION__, retVal, (ULONG)size));
} else {
DPrintf(0, ("[%s] failed to allocate size %x\n", __FUNCTION__, (ULONG)size));
}
return retVal;
}
static void mem_free_nonpaged_block(void *context, void *addr)
{
UNREFERENCED_PARAMETER(context);
NdisFreeMemory(addr, 0, 0);
DPrintf(6, ("[%s] freed %p\n", __FUNCTION__, addr));
}
static int PCIReadConfig(PPARANDIS_ADAPTER pContext,
int where,
void *buffer,
size_t length)
{
ULONG read;
read = NdisReadPciSlotInformation(
pContext->MiniportHandle,
0 /* SlotNumber */,
where,
buffer,
(ULONG)length);
if (read == length) {
DPrintf(6, ("[%s] read %d bytes at %d\n", __FUNCTION__, read, where));
return 0;
} else {
DPrintf(0, ("[%s] failed to read %d bytes at %d\n", __FUNCTION__, read, where));
return -1;
}
}
static int pci_read_config_byte(void *context, int where, u8 *bVal)
{
return PCIReadConfig((PPARANDIS_ADAPTER)context, where, bVal, sizeof(*bVal));
}
static int pci_read_config_word(void *context, int where, u16 *wVal)
{
return PCIReadConfig((PPARANDIS_ADAPTER)context, where, wVal, sizeof(*wVal));
}
static int pci_read_config_dword(void *context, int where, u32 *dwVal)
{
return PCIReadConfig((PPARANDIS_ADAPTER)context, where, dwVal, sizeof(*dwVal));
}
static size_t pci_get_resource_len(void *context, int bar)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context;
if (bar < PCI_TYPE0_ADDRESSES) {
return pContext->AdapterResources.PciBars[bar].uLength;
}
DPrintf(0, ("[%s] queried invalid BAR %d\n", __FUNCTION__, bar));
return 0;
}
static void *pci_map_address_range(void *context, int bar, size_t offset, size_t maxlen)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context;
if (bar < PCI_TYPE0_ADDRESSES) {
tBusResource *pRes = &pContext->AdapterResources.PciBars[bar];
if (pRes->pBase == NULL) {
/* BAR not mapped yet */
if (pRes->bPortSpace) {
if (NDIS_STATUS_SUCCESS == NdisMRegisterIoPortRange(
&pRes->pBase,
pContext->MiniportHandle,
pRes->BasePA.LowPart,
pRes->uLength)) {
DPrintf(6, ("[%s] mapped port BAR at %x\n", __FUNCTION__, pRes->BasePA.LowPart));
} else {
pRes->pBase = NULL;
DPrintf(0, ("[%s] failed to map port BAR at %x\n", __FUNCTION__, pRes->BasePA.LowPart));
}
} else {
if (NDIS_STATUS_SUCCESS == NdisMMapIoSpace(
&pRes->pBase,
pContext->MiniportHandle,
pRes->BasePA,
pRes->uLength)) {
DPrintf(6, ("[%s] mapped memory BAR at %I64x\n", __FUNCTION__, pRes->BasePA.QuadPart));
} else {
pRes->pBase = NULL;
DPrintf(0, ("[%s] failed to map memory BAR at %I64x\n", __FUNCTION__, pRes->BasePA.QuadPart));
}
}
}
if (pRes->pBase != NULL && offset < pRes->uLength) {
if (pRes->bPortSpace) {
/* use physical address for port I/O */
return (PUCHAR)(ULONG_PTR)pRes->BasePA.LowPart + offset;
} else {
/* use virtual address for memory I/O */
return (PUCHAR)pRes->pBase + offset;
}
} else {
DPrintf(0, ("[%s] failed to get map BAR %d, offset %x\n", __FUNCTION__, bar, offset));
}
} else {
DPrintf(0, ("[%s] queried invalid BAR %d\n", __FUNCTION__, bar));
}
return NULL;
}
static u16 vdev_get_msix_vector(void *context, int queue)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context;
u16 vector = VIRTIO_MSI_NO_VECTOR;
/* we don't run on MSI support so this will never be true */
if (pContext->bUsingMSIX && queue >= 0) {
vector = (u16)pContext->AdapterResources.Vector;
}
return vector;
}
static void vdev_sleep(void *context, unsigned int msecs)
{
UNREFERENCED_PARAMETER(context);
NdisMSleep(1000 * msecs);
}
VirtIOSystemOps ParaNdisSystemOps = {
/* .vdev_read_byte = */ ReadVirtIODeviceByte,
/* .vdev_read_word = */ ReadVirtIODeviceWord,
/* .vdev_read_dword = */ ReadVirtIODeviceRegister,
/* .vdev_write_byte = */ WriteVirtIODeviceByte,
/* .vdev_write_word = */ WriteVirtIODeviceWord,
/* .vdev_write_dword = */ WriteVirtIODeviceRegister,
/* .mem_alloc_contiguous_pages = */ mem_alloc_contiguous_pages,
/* .mem_free_contiguous_pages = */ mem_free_contiguous_pages,
/* .mem_get_physical_address = */ mem_get_physical_address,
/* .mem_alloc_nonpaged_block = */ mem_alloc_nonpaged_block,
/* .mem_free_nonpaged_block = */ mem_free_nonpaged_block,
/* .pci_read_config_byte = */ pci_read_config_byte,
/* .pci_read_config_word = */ pci_read_config_word,
/* .pci_read_config_dword = */ pci_read_config_dword,
/* .pci_get_resource_len = */ pci_get_resource_len,
/* .pci_map_address_range = */ pci_map_address_range,
/* .vdev_get_msix_vector = */ vdev_get_msix_vector,
/*.vdev_sleep = */ vdev_sleep,
};

View file

@ -0,0 +1,123 @@
/*
* Contains common Ethernet-related definition, not defined in NDIS
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _ETHERNET_UTILS_H
#define _ETHERNET_UTILS_H
// assuming <ndis.h> included
#define ETH_IS_LOCALLY_ADMINISTERED(Address) \
(BOOLEAN)(((PUCHAR)(Address))[0] & ((UCHAR)0x02))
#define ETH_IS_EMPTY(Address) \
((((PUCHAR)(Address))[0] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[1] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[2] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[3] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[4] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[5] == ((UCHAR)0x00)))
#define ETH_HAS_PRIO_HEADER(Address) \
(((PUCHAR)(Address))[12] == ((UCHAR)0x81) && ((PUCHAR)(Address))[13] == ((UCHAR)0x00))
#include <pshpack1.h>
typedef struct _ETH_HEADER
{
UCHAR DstAddr[ETH_LENGTH_OF_ADDRESS];
UCHAR SrcAddr[ETH_LENGTH_OF_ADDRESS];
USHORT EthType;
} ETH_HEADER, *PETH_HEADER;
#include <poppack.h>
#define ETH_HEADER_SIZE (sizeof(ETH_HEADER))
#define ETH_MIN_PACKET_SIZE 60
#define ETH_PRIORITY_HEADER_OFFSET 12
#define ETH_PRIORITY_HEADER_SIZE 4
static void FORCEINLINE SetPriorityData(UCHAR *pDest, ULONG priority, ULONG VlanID)
{
pDest[0] = 0x81;
pDest[2] = (UCHAR)(priority << 5);
pDest[2] |= (UCHAR)(VlanID >> 8);
pDest[3] |= (UCHAR)VlanID;
}
typedef enum _tag_eInspectedPacketType
{
iptUnicast,
iptBroadcast,
iptMulticast,
iptInvalid
}eInspectedPacketType;
// IP Header RFC 791
typedef struct _tagIPv4Header {
UCHAR ip_verlen; // length in 32-bit units(low nibble), version (high nibble)
UCHAR ip_tos; // Type of service
USHORT ip_length; // Total length
USHORT ip_id; // Identification
USHORT ip_offset; // fragment offset and flags
UCHAR ip_ttl; // Time to live
UCHAR ip_protocol; // Protocol
USHORT ip_xsum; // Header checksum
ULONG ip_src; // Source IP address
ULONG ip_dest; // Destination IP address
} IPv4Header;
// TCP header RFC 793
typedef struct _tagTCPHeader {
USHORT tcp_src; // Source port
USHORT tcp_dest; // Destination port
ULONG tcp_seq; // Sequence number
ULONG tcp_ack; // Ack number
USHORT tcp_flags; // header length and flags
USHORT tcp_window; // Window size
USHORT tcp_xsum; // Checksum
USHORT tcp_urgent; // Urgent
}TCPHeader;
// UDP Header RFC 768
typedef struct _tagUDPHeader {
USHORT udp_src; // Source port
USHORT udp_dest; // Destination port
USHORT udp_length; // length of datagram
USHORT udp_xsum; // checksum
}UDPHeader;
#define TCP_CHECKSUM_OFFSET 16
#define UDP_CHECKSUM_OFFSET 6
#define MAX_IPV4_HEADER_SIZE 60
#define MAX_TCP_HEADER_SIZE 60
static __inline USHORT swap_short(USHORT us)
{
return (us << 8) | (us >> 8);
}
#endif

View file

@ -0,0 +1,108 @@
/*
* This file contains debug-related definitions for kernel driver
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/**********************************************************************
WARNING: this file is incompatible with Logo requirements
TODO: Optional WPP technique
**********************************************************************/
#ifndef _K_DEBUG_PRINT_H
#define _K_DEBUG_PRINT_H
extern int nDebugLevel;
extern int bDebugPrint;
typedef void (*DEBUGPRINTFUNC)(const char *fmt, ...);
extern DEBUGPRINTFUNC pDebugPrint;
void _LogOutEntry(int level, const char *s);
void _LogOutExitValue(int level, const char *s, ULONG value);
void _LogOutString(int level, const char *s);
#define DEBUG_ENTRY(level) _LogOutEntry(level, __FUNCTION__)
#define DEBUG_EXIT_STATUS(level, status) _LogOutExitValue(level, __FUNCTION__, status)
#define DPrintFunctionName(Level) _LogOutString(Level, __FUNCTION__)
#ifndef WPP_EVENT_TRACING
#define WPP_INIT_TRACING(a,b)
#define WPP_CLEANUP(a)
#define MAX_DEBUG_LEVEL 1
#define DPrintf(Level, Fmt) { if ( (Level) > MAX_DEBUG_LEVEL || (Level) > nDebugLevel || !bDebugPrint ) {} else { pDebugPrint Fmt; } }
#define DPrintfBypass(Level, Fmt) DPrintf(Level, Fmt)
#else
//#define WPP_USE_BYPASS
#define DPrintfAnyway(Level, Fmt) \
{ \
if (bDebugPrint && (Level) <= nDebugLevel) \
{ \
pDebugPrint Fmt; \
} \
}
//{05F77115-E57E-49bf-90DF-C0E6B6478E5F}
#define WPP_CONTROL_GUIDS \
WPP_DEFINE_CONTROL_GUID(NetKVM, (05F77115,E57E,49bf,90DF,C0E6B6478E5F), \
WPP_DEFINE_BIT(TRACE_DEBUG)\
)
#define WPP_LEVEL_ENABLED(LEVEL) \
(nDebugLevel >= (LEVEL))
#define WPP_LEVEL_LOGGER(LEVEL) (WPP_CONTROL(WPP_BIT_ ## TRACE_DEBUG).Logger),
#if WPP_USE_BYPASS
#define DPrintfBypass(Level, Fmt) DPrintfAnyway(Level, Fmt)
#else
#define DPrintfBypass(Level, Fmt)
#endif
#define WPP_PRIVATE_ENABLE_CALLBACK WppEnableCallback
extern VOID WppEnableCallback(
__in LPCGUID Guid,
__in __int64 Logger,
__in BOOLEAN Enable,
__in ULONG Flags,
__in UCHAR Level);
#endif
#endif

View file

@ -0,0 +1,892 @@
/*
* This file contains general definitions for VirtIO network adapter driver,
* common for both NDIS5 and NDIS6
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef PARANDIS_56_COMMON_H
#define PARANDIS_56_COMMON_H
//#define PARANDIS_TEST_TX_KICK_ALWAYS
#if defined(OFFLOAD_UNIT_TEST)
#include <windows.h>
#include <stdio.h>
#define ETH_LENGTH_OF_ADDRESS 6
#define DoPrint(fmt, ...) printf(fmt##"\n", __VA_ARGS__)
#define DPrintf(a,b) DoPrint b
#define RtlOffsetToPointer(B,O) ((PCHAR)( ((PCHAR)(B)) + ((ULONG_PTR)(O)) ))
#include "ethernetutils.h"
#endif //+OFFLOAD_UNIT_TEST
#if !defined(OFFLOAD_UNIT_TEST)
#if !defined(RtlOffsetToPointer)
#define RtlOffsetToPointer(Base,Offset) ((PCHAR)(((PCHAR)(Base))+((ULONG_PTR)(Offset))))
#endif
#if !defined(RtlPointerToOffset)
#define RtlPointerToOffset(Base,Pointer) ((ULONG)(((PCHAR)(Pointer))-((PCHAR)(Base))))
#endif
#include <ndis.h>
#include "osdep.h"
#include "kdebugprint.h"
#include "ethernetutils.h"
#include "virtio_pci.h"
#include "VirtIO.h"
#include "virtio_ring.h"
#include "IONetDescriptor.h"
#include "DebugData.h"
// those stuff defined in NDIS
//NDIS_MINIPORT_MAJOR_VERSION
//NDIS_MINIPORT_MINOR_VERSION
// those stuff defined in build environment
// PARANDIS_MAJOR_DRIVER_VERSION
// PARANDIS_MINOR_DRIVER_VERSION
#if !defined(NDIS_MINIPORT_MAJOR_VERSION) || !defined(NDIS_MINIPORT_MINOR_VERSION)
#error "Something is wrong with NDIS environment"
#endif
//define to see when the status register is unreadable(see ParaNdis_ResetVirtIONetDevice)
//#define VIRTIO_RESET_VERIFY
//define to if hardware raise interrupt on error (see ParaNdis_DPCWorkBody)
//#define VIRTIO_SIGNAL_ERROR
// define if qemu supports logging to static IO port for synchronization
// of driver output with qemu printouts; in this case define the port number
// #define VIRTIO_DBG_USE_IOPORT 0x99
// to be set to real limit later
#define MAX_RX_LOOPS 1000
// maximum number of virtio queues used by the driver
#define MAX_NUM_OF_QUEUES 3
/* The feature bitmap for virtio net */
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can handle merged Rx buffers and requires bigger header for that. */
#define VIRTIO_NET_F_STATUS 16
#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
#define VIRTIO_NET_INVALID_INTERRUPT_STATUS 0xFF
#define PARANDIS_MULTICAST_LIST_SIZE 32
#define PARANDIS_MEMORY_TAG '5muQ'
#define PARANDIS_FORMAL_LINK_SPEED (pContext->ulFormalLinkSpeed)
#define PARANDIS_MAXIMUM_TRANSMIT_SPEED PARANDIS_FORMAL_LINK_SPEED
#define PARANDIS_MAXIMUM_RECEIVE_SPEED PARANDIS_FORMAL_LINK_SPEED
#define PARANDIS_MIN_LSO_SEGMENTS 2
// reported
#define PARANDIS_MAX_LSO_SIZE 0xF800
#define PARANDIS_UNLIMITED_PACKETS_TO_INDICATE (~0ul)
extern VirtIOSystemOps ParaNdisSystemOps;
typedef enum _tagInterruptSource
{
isControl = VIRTIO_PCI_ISR_CONFIG,
isReceive = 0x10,
isTransmit = 0x20,
isUnknown = 0x40,
isBothTransmitReceive = isReceive | isTransmit,
isAny = isReceive | isTransmit | isControl | isUnknown,
isDisable = 0x80
}tInterruptSource;
static const ULONG PARANDIS_PACKET_FILTERS =
NDIS_PACKET_TYPE_DIRECTED |
NDIS_PACKET_TYPE_MULTICAST |
NDIS_PACKET_TYPE_BROADCAST |
NDIS_PACKET_TYPE_PROMISCUOUS |
NDIS_PACKET_TYPE_ALL_MULTICAST;
typedef VOID (*ONPAUSECOMPLETEPROC)(VOID *);
typedef enum _tagSendReceiveState
{
srsDisabled = 0, // initial state
srsPausing,
srsEnabled
} tSendReceiveState;
typedef struct _tagBusResource {
NDIS_PHYSICAL_ADDRESS BasePA;
ULONG uLength;
PVOID pBase;
BOOLEAN bPortSpace;
BOOLEAN bUsed;
} tBusResource;
typedef struct _tagAdapterResources
{
tBusResource PciBars[PCI_TYPE0_ADDRESSES];
ULONG Vector;
ULONG Level;
KAFFINITY Affinity;
ULONG InterruptFlags;
} tAdapterResources;
typedef enum _tagOffloadSettingsBit
{
osbT4IpChecksum = (1 << 0),
osbT4TcpChecksum = (1 << 1),
osbT4UdpChecksum = (1 << 2),
osbT4TcpOptionsChecksum = (1 << 3),
osbT4IpOptionsChecksum = (1 << 4),
osbT4Lso = (1 << 5),
osbT4LsoIp = (1 << 6),
osbT4LsoTcp = (1 << 7),
osbT4RxTCPChecksum = (1 << 8),
osbT4RxTCPOptionsChecksum = (1 << 9),
osbT4RxIPChecksum = (1 << 10),
osbT4RxIPOptionsChecksum = (1 << 11),
osbT4RxUDPChecksum = (1 << 12),
osbT6TcpChecksum = (1 << 13),
osbT6UdpChecksum = (1 << 14),
osbT6TcpOptionsChecksum = (1 << 15),
osbT6IpExtChecksum = (1 << 16),
osbT6Lso = (1 << 17),
osbT6LsoIpExt = (1 << 18),
osbT6LsoTcpOptions = (1 << 19),
osbT6RxTCPChecksum = (1 << 20),
osbT6RxTCPOptionsChecksum = (1 << 21),
osbT6RxUDPChecksum = (1 << 22),
osbT6RxIpExtChecksum = (1 << 23),
}tOffloadSettingsBit;
typedef struct _tagOffloadSettingsFlags
{
ULONG fTxIPChecksum : 1;
ULONG fTxTCPChecksum : 1;
ULONG fTxUDPChecksum : 1;
ULONG fTxTCPOptions : 1;
ULONG fTxIPOptions : 1;
ULONG fTxLso : 1;
ULONG fTxLsoIP : 1;
ULONG fTxLsoTCP : 1;
ULONG fRxIPChecksum : 1;
ULONG fRxTCPChecksum : 1;
ULONG fRxUDPChecksum : 1;
ULONG fRxTCPOptions : 1;
ULONG fRxIPOptions : 1;
ULONG fTxTCPv6Checksum : 1;
ULONG fTxUDPv6Checksum : 1;
ULONG fTxTCPv6Options : 1;
ULONG fTxIPv6Ext : 1;
ULONG fTxLsov6 : 1;
ULONG fTxLsov6IP : 1;
ULONG fTxLsov6TCP : 1;
ULONG fRxTCPv6Checksum : 1;
ULONG fRxUDPv6Checksum : 1;
ULONG fRxTCPv6Options : 1;
ULONG fRxIPv6Ext : 1;
}tOffloadSettingsFlags;
typedef struct _tagOffloadSettings
{
/* current value of enabled offload features */
tOffloadSettingsFlags flags;
/* load once, do not modify - bitmask of offload features, enabled in configuration */
ULONG flagsValue;
ULONG ipHeaderOffset;
ULONG maxPacketSize;
}tOffloadSettings;
typedef struct _tagChecksumCheckResult
{
union
{
struct
{
ULONG TcpFailed :1;
ULONG UdpFailed :1;
ULONG IpFailed :1;
ULONG TcpOK :1;
ULONG UdpOK :1;
ULONG IpOK :1;
} flags;
ULONG value;
};
}tChecksumCheckResult;
/*
for simplicity, we use for NDIS5 the same statistics as native NDIS6 uses
*/
typedef struct _tagNdisStatistics
{
ULONG64 ifHCInOctets;
ULONG64 ifHCInUcastPkts;
ULONG64 ifHCInUcastOctets;
ULONG64 ifHCInMulticastPkts;
ULONG64 ifHCInMulticastOctets;
ULONG64 ifHCInBroadcastPkts;
ULONG64 ifHCInBroadcastOctets;
ULONG64 ifInDiscards;
ULONG64 ifInErrors;
ULONG64 ifHCOutOctets;
ULONG64 ifHCOutUcastPkts;
ULONG64 ifHCOutUcastOctets;
ULONG64 ifHCOutMulticastPkts;
ULONG64 ifHCOutMulticastOctets;
ULONG64 ifHCOutBroadcastPkts;
ULONG64 ifHCOutBroadcastOctets;
ULONG64 ifOutDiscards;
ULONG64 ifOutErrors;
}NDIS_STATISTICS_INFO;
typedef PNDIS_PACKET tPacketType;
typedef PNDIS_PACKET tPacketHolderType;
typedef PNDIS_PACKET tPacketIndicationType;
typedef struct _tagNdisOffloadParams
{
UCHAR IPv4Checksum;
UCHAR TCPIPv4Checksum;
UCHAR UDPIPv4Checksum;
UCHAR LsoV1;
UCHAR LsoV2IPv4;
UCHAR TCPIPv6Checksum;
UCHAR UDPIPv6Checksum;
UCHAR LsoV2IPv6;
}NDIS_OFFLOAD_PARAMETERS;
//#define UNIFY_LOCKS
typedef struct _tagOurCounters
{
UINT nReusedRxBuffers;
UINT nPrintDiagnostic;
ULONG64 prevIn;
UINT nRxInactivity;
}tOurCounters;
typedef struct _tagMaxPacketSize
{
UINT nMaxDataSize;
UINT nMaxFullSizeOS;
UINT nMaxFullSizeHwTx;
UINT nMaxFullSizeHwRx;
}tMaxPacketSize;
typedef struct _tagCompletePhysicalAddress
{
PHYSICAL_ADDRESS Physical;
PVOID Virtual;
ULONG size;
ULONG IsCached : 1;
ULONG IsTX : 1;
} tCompletePhysicalAddress;
typedef struct _tagMulticastData
{
ULONG nofMulticastEntries;
UCHAR MulticastList[ETH_LENGTH_OF_ADDRESS * PARANDIS_MULTICAST_LIST_SIZE];
}tMulticastData;
typedef struct _tagIONetDescriptor {
LIST_ENTRY listEntry;
tCompletePhysicalAddress HeaderInfo;
tCompletePhysicalAddress DataInfo;
tPacketHolderType pHolder;
PVOID ReferenceValue;
UINT nofUsedBuffers;
} IONetDescriptor, * pIONetDescriptor;
typedef void (*tReuseReceiveBufferProc)(void *pContext, pIONetDescriptor pDescriptor);
typedef struct _tagPARANDIS_ADAPTER
{
NDIS_HANDLE DriverHandle;
NDIS_HANDLE MiniportHandle;
NDIS_EVENT ResetEvent;
tAdapterResources AdapterResources;
tBusResource SharedMemoryRanges[MAX_NUM_OF_QUEUES];
VirtIODevice IODevice;
BOOLEAN bIODeviceInitialized;
ULONGLONG ullHostFeatures;
ULONGLONG ullGuestFeatures;
LARGE_INTEGER LastTxCompletionTimeStamp;
#ifdef PARANDIS_DEBUG_INTERRUPTS
LARGE_INTEGER LastInterruptTimeStamp;
#endif
BOOLEAN bConnected;
BOOLEAN bEnableInterruptHandlingDPC;
BOOLEAN bEnableInterruptChecking;
BOOLEAN bDoInterruptRecovery;
BOOLEAN bDoSupportPriority;
BOOLEAN bDoHwPacketFiltering;
BOOLEAN bUseScatterGather;
BOOLEAN bBatchReceive;
BOOLEAN bLinkDetectSupported;
BOOLEAN bDoHardwareChecksum;
BOOLEAN bDoGuestChecksumOnReceive;
BOOLEAN bDoIPCheckTx;
BOOLEAN bDoIPCheckRx;
BOOLEAN bUseMergedBuffers;
BOOLEAN bDoKickOnNoBuffer;
BOOLEAN bSurprizeRemoved;
BOOLEAN bUsingMSIX;
BOOLEAN bUseIndirect;
BOOLEAN bHasHardwareFilters;
BOOLEAN bHasControlQueue;
BOOLEAN bNoPauseOnSuspend;
BOOLEAN bFastSuspendInProcess;
BOOLEAN bResetInProgress;
ULONG ulCurrentVlansFilterSet;
tMulticastData MulticastData;
UINT uNumberOfHandledRXPacketsInDPC;
NDIS_DEVICE_POWER_STATE powerState;
LONG dpcReceiveActive;
LONG counterDPCInside;
LONG bDPCInactive;
LONG InterruptStatus;
ULONG ulPriorityVlanSetting;
ULONG VlanId;
ULONGLONG ulFormalLinkSpeed;
ULONG ulEnableWakeup;
tMaxPacketSize MaxPacketSize;
ULONG nEnableDPCChecker;
ULONG ulUniqueID;
UCHAR PermanentMacAddress[ETH_LENGTH_OF_ADDRESS];
UCHAR CurrentMacAddress[ETH_LENGTH_OF_ADDRESS];
ULONG PacketFilter;
ULONG DummyLookAhead;
ULONG ulMilliesToConnect;
ULONG nDetectedStoppedTx;
ULONG nDetectedInactivity;
ULONG nVirtioHeaderSize;
/* send part */
#if !defined(UNIFY_LOCKS)
NDIS_SPIN_LOCK SendLock;
NDIS_SPIN_LOCK ReceiveLock;
#else
union
{
NDIS_SPIN_LOCK SendLock;
NDIS_SPIN_LOCK ReceiveLock;
};
#endif
NDIS_STATISTICS_INFO Statistics;
struct
{
ULONG framesCSOffload;
ULONG framesLSO;
ULONG framesIndirect;
ULONG framesRxPriority;
ULONG framesRxCSHwOK;
ULONG framesRxCSHwMissedBad;
ULONG framesRxCSHwMissedGood;
ULONG framesFilteredOut;
} extraStatistics;
tOurCounters Counters;
tOurCounters Limits;
tSendReceiveState SendState;
tSendReceiveState ReceiveState;
ONPAUSECOMPLETEPROC SendPauseCompletionProc;
ONPAUSECOMPLETEPROC ReceivePauseCompletionProc;
tReuseReceiveBufferProc ReuseBufferProc;
/* Net part - management of buffers and queues of QEMU */
struct virtqueue * NetControlQueue;
tCompletePhysicalAddress ControlData;
struct virtqueue * NetReceiveQueue;
struct virtqueue * NetSendQueue;
/* list of Rx buffers available for data (under VIRTIO management) */
LIST_ENTRY NetReceiveBuffers;
UINT NetNofReceiveBuffers;
/* list of Rx buffers waiting for return (under NDIS management) */
LIST_ENTRY NetReceiveBuffersWaiting;
/* list of Tx buffers in process (under VIRTIO management) */
LIST_ENTRY NetSendBuffersInUse;
/* list of Tx buffers ready for data (under MINIPORT management) */
LIST_ENTRY NetFreeSendBuffers;
/* current number of free Tx descriptors */
UINT nofFreeTxDescriptors;
/* initial number of free Tx descriptor(from cfg) - max number of available Tx descriptors */
UINT maxFreeTxDescriptors;
/* current number of free Tx buffers, which can be submitted */
UINT nofFreeHardwareBuffers;
/* maximal number of free Tx buffers, which can be used by SG */
UINT maxFreeHardwareBuffers;
/* minimal number of free Tx buffers */
UINT minFreeHardwareBuffers;
/* current number of Tx packets (or lists) to return */
LONG NetTxPacketsToReturn;
/* total of Rx buffer in turnaround */
UINT NetMaxReceiveBuffers;
struct VirtIOBufferDescriptor *sgTxGatherTable;
UINT nPnpEventIndex;
NDIS_DEVICE_PNP_EVENT PnpEvents[16];
tOffloadSettings Offload;
NDIS_OFFLOAD_PARAMETERS InitialOffloadParameters;
// we keep these members common for XP and Vista
// for XP and non-MSI case of Vista they are set to zero
ULONG ulRxMessage;
ULONG ulTxMessage;
ULONG ulControlMessage;
NDIS_MINIPORT_INTERRUPT Interrupt;
NDIS_HANDLE PacketPool;
NDIS_HANDLE BuffersPool;
NDIS_HANDLE WrapperConfigurationHandle;
LIST_ENTRY SendQueue;
LIST_ENTRY TxWaitingList;
NDIS_EVENT HaltEvent;
NDIS_TIMER ConnectTimer;
NDIS_TIMER DPCPostProcessTimer;
BOOLEAN bDmaInitialized;
}PARANDIS_ADAPTER, *PPARANDIS_ADAPTER;
typedef enum { cpeOK, cpeNoBuffer, cpeInternalError, cpeTooLarge, cpeNoIndirect } tCopyPacketError;
typedef struct _tagCopyPacketResult
{
ULONG size;
tCopyPacketError error;
}tCopyPacketResult;
typedef struct _tagSynchronizedContext
{
PARANDIS_ADAPTER *pContext;
PVOID Parameter;
}tSynchronizedContext;
typedef BOOLEAN (*tSynchronizedProcedure)(tSynchronizedContext *context);
/**********************************************************
LAZY release procedure returns buffers to VirtIO
only where there are no free buffers available
NON-LAZY release releases transmit buffers from VirtIO
library every time there is something to release
***********************************************************/
//#define LAZY_TX_RELEASE
static inline bool VirtIODeviceGetHostFeature(PARANDIS_ADAPTER *pContext, unsigned uFeature)
{
DPrintf(4, ("%s\n", __FUNCTION__));
return virtio_is_feature_enabled(pContext->ullHostFeatures, uFeature);
}
static inline void VirtIODeviceEnableGuestFeature(PARANDIS_ADAPTER *pContext, unsigned uFeature)
{
DPrintf(4, ("%s\n", __FUNCTION__));
virtio_feature_enable(pContext->ullGuestFeatures, uFeature);
}
static BOOLEAN FORCEINLINE IsTimeToReleaseTx(PARANDIS_ADAPTER *pContext)
{
#ifndef LAZY_TX_RELEASE
return pContext->nofFreeTxDescriptors < pContext->maxFreeTxDescriptors;
#else
return pContext->nofFreeTxDescriptors == 0;
#endif
}
static BOOLEAN FORCEINLINE IsValidVlanId(PARANDIS_ADAPTER *pContext, ULONG VlanID)
{
return pContext->VlanId == 0 || pContext->VlanId == VlanID;
}
static BOOLEAN FORCEINLINE IsVlanSupported(PARANDIS_ADAPTER *pContext)
{
return pContext->ulPriorityVlanSetting & 2;
}
static BOOLEAN FORCEINLINE IsPrioritySupported(PARANDIS_ADAPTER *pContext)
{
return pContext->ulPriorityVlanSetting & 1;
}
BOOLEAN ParaNdis_ValidateMacAddress(
PUCHAR pcMacAddress,
BOOLEAN bLocal);
NDIS_STATUS ParaNdis_InitializeContext(
PARANDIS_ADAPTER *pContext,
PNDIS_RESOURCE_LIST ResourceList);
NDIS_STATUS ParaNdis_FinishInitialization(
PARANDIS_ADAPTER *pContext);
VOID ParaNdis_CleanupContext(
PARANDIS_ADAPTER *pContext);
UINT ParaNdis_VirtIONetReleaseTransmitBuffers(
PARANDIS_ADAPTER *pContext);
ULONG ParaNdis_DPCWorkBody(
PARANDIS_ADAPTER *pContext,
ULONG ulMaxPacketsToIndicate);
NDIS_STATUS ParaNdis_SetMulticastList(
PARANDIS_ADAPTER *pContext,
PVOID Buffer,
ULONG BufferSize,
PUINT pBytesRead,
PUINT pBytesNeeded);
VOID ParaNdis_VirtIOEnableIrqSynchronized(
PARANDIS_ADAPTER *pContext,
ULONG interruptSource);
VOID ParaNdis_VirtIODisableIrqSynchronized(
PARANDIS_ADAPTER *pContext,
ULONG interruptSource);
static __inline struct virtqueue *
ParaNdis_GetQueueForInterrupt(PARANDIS_ADAPTER *pContext, ULONG interruptSource)
{
if (interruptSource & isTransmit)
return pContext->NetSendQueue;
if (interruptSource & isReceive)
return pContext->NetReceiveQueue;
return NULL;
}
static __inline BOOLEAN
ParaNDIS_IsQueueInterruptEnabled(struct virtqueue * _vq)
{
return virtqueue_is_interrupt_enabled(_vq);
}
VOID ParaNdis_OnPnPEvent(
PARANDIS_ADAPTER *pContext,
NDIS_DEVICE_PNP_EVENT pEvent,
PVOID pInfo,
ULONG ulSize);
BOOLEAN ParaNdis_OnLegacyInterrupt(
PARANDIS_ADAPTER *pContext,
BOOLEAN *pRunDpc);
BOOLEAN ParaNdis_OnQueuedInterrupt(
PARANDIS_ADAPTER *pContext,
BOOLEAN *pRunDpc,
ULONG knownInterruptSources);
VOID ParaNdis_OnShutdown(
PARANDIS_ADAPTER *pContext);
BOOLEAN ParaNdis_CheckForHang(
PARANDIS_ADAPTER *pContext);
VOID ParaNdis_ReportLinkStatus(
PARANDIS_ADAPTER *pContext,
BOOLEAN bForce);
NDIS_STATUS ParaNdis_PowerOn(
PARANDIS_ADAPTER *pContext
);
VOID ParaNdis_PowerOff(
PARANDIS_ADAPTER *pContext
);
void ParaNdis_DebugInitialize(PVOID DriverObject,PVOID RegistryPath);
void ParaNdis_DebugCleanup(PDRIVER_OBJECT pDriverObject);
void ParaNdis_DebugRegisterMiniport(PARANDIS_ADAPTER *pContext, BOOLEAN bRegister);
//#define ENABLE_HISTORY_LOG
#if !defined(ENABLE_HISTORY_LOG)
static void FORCEINLINE ParaNdis_DebugHistory(
PARANDIS_ADAPTER *pContext,
eHistoryLogOperation op,
PVOID pParam1,
ULONG lParam2,
ULONG lParam3,
ULONG lParam4)
{
}
#else
void ParaNdis_DebugHistory(
PARANDIS_ADAPTER *pContext,
eHistoryLogOperation op,
PVOID pParam1,
ULONG lParam2,
ULONG lParam3,
ULONG lParam4);
#endif
typedef struct _tagTxOperationParameters
{
tPacketType packet;
PVOID ReferenceValue;
UINT nofSGFragments;
ULONG ulDataSize;
ULONG offloadMss;
ULONG tcpHeaderOffset;
ULONG flags; //see tPacketOffloadRequest
}tTxOperationParameters;
tCopyPacketResult ParaNdis_DoCopyPacketData(
PARANDIS_ADAPTER *pContext,
tTxOperationParameters *pParams);
typedef struct _tagMapperResult
{
USHORT usBuffersMapped;
USHORT usBufferSpaceUsed;
ULONG ulDataSize;
}tMapperResult;
tCopyPacketResult ParaNdis_DoSubmitPacket(PARANDIS_ADAPTER *pContext, tTxOperationParameters *Params);
void ParaNdis_ResetOffloadSettings(PARANDIS_ADAPTER *pContext, tOffloadSettingsFlags *pDest, PULONG from);
tChecksumCheckResult ParaNdis_CheckRxChecksum(PARANDIS_ADAPTER *pContext, ULONG virtioFlags, PVOID pRxPacket, ULONG len);
void ParaNdis_CallOnBugCheck(PARANDIS_ADAPTER *pContext);
/*****************************************************
Procedures to implement for NDIS specific implementation
******************************************************/
PVOID ParaNdis_AllocateMemory(
PARANDIS_ADAPTER *pContext,
ULONG ulRequiredSize);
NDIS_STATUS NTAPI ParaNdis_FinishSpecificInitialization(
PARANDIS_ADAPTER *pContext);
VOID ParaNdis_FinalizeCleanup(
PARANDIS_ADAPTER *pContext);
NDIS_HANDLE ParaNdis_OpenNICConfiguration(
PARANDIS_ADAPTER *pContext);
tPacketIndicationType ParaNdis_IndicateReceivedPacket(
PARANDIS_ADAPTER *pContext,
PVOID dataBuffer,
PULONG pLength,
BOOLEAN bPrepareOnly,
pIONetDescriptor pBufferDesc);
VOID ParaNdis_IndicateReceivedBatch(
PARANDIS_ADAPTER *pContext,
tPacketIndicationType *pBatch,
ULONG nofPackets);
VOID ParaNdis_PacketMapper(
PARANDIS_ADAPTER *pContext,
tPacketType packet,
PVOID Reference,
struct VirtIOBufferDescriptor *buffers,
pIONetDescriptor pDesc,
tMapperResult *pMapperResult
);
tCopyPacketResult ParaNdis_PacketCopier(
tPacketType packet,
PVOID dest,
ULONG maxSize,
PVOID refValue,
BOOLEAN bPreview);
BOOLEAN ParaNdis_ProcessTx(
PARANDIS_ADAPTER *pContext,
BOOLEAN IsDpc,
BOOLEAN IsInterrupt);
BOOLEAN ParaNdis_SetTimer(
NDIS_HANDLE timer,
LONG millies);
BOOLEAN ParaNdis_SynchronizeWithInterrupt(
PARANDIS_ADAPTER *pContext,
ULONG messageId,
tSynchronizedProcedure procedure,
PVOID parameter);
VOID ParaNdis_Suspend(
PARANDIS_ADAPTER *pContext);
VOID ParaNdis_Resume(
PARANDIS_ADAPTER *pContext);
VOID ParaNdis_OnTransmitBufferReleased(
PARANDIS_ADAPTER *pContext,
IONetDescriptor *pDesc);
typedef VOID (*tOnAdditionalPhysicalMemoryAllocated)(
PARANDIS_ADAPTER *pContext,
tCompletePhysicalAddress *pAddresses);
typedef struct _tagPhysicalAddressAllocationContext
{
tCompletePhysicalAddress address;
PARANDIS_ADAPTER *pContext;
tOnAdditionalPhysicalMemoryAllocated Callback;
} tPhysicalAddressAllocationContext;
BOOLEAN ParaNdis_InitialAllocatePhysicalMemory(
PARANDIS_ADAPTER *pContext,
tCompletePhysicalAddress *pAddresses);
VOID ParaNdis_FreePhysicalMemory(
PARANDIS_ADAPTER *pContext,
tCompletePhysicalAddress *pAddresses);
BOOLEAN ParaNdis_BindBufferToPacket(
PARANDIS_ADAPTER *pContext,
pIONetDescriptor pBufferDesc);
void ParaNdis_UnbindBufferFromPacket(
PARANDIS_ADAPTER *pContext,
pIONetDescriptor pBufferDesc);
void ParaNdis_IndicateConnect(
PARANDIS_ADAPTER *pContext,
BOOLEAN bConnected,
BOOLEAN bForce);
void ParaNdis_RestoreDeviceConfigurationAfterReset(
PARANDIS_ADAPTER *pContext);
VOID ParaNdis_UpdateDeviceFilters(
PARANDIS_ADAPTER *pContext);
VOID ParaNdis_DeviceFiltersUpdateVlanId(
PARANDIS_ADAPTER *pContext);
VOID ParaNdis_SetPowerState(
PARANDIS_ADAPTER *pContext,
NDIS_DEVICE_POWER_STATE newState);
#endif //-OFFLOAD_UNIT_TEST
typedef enum _tagppResult
{
ppresNotTested = 0,
ppresNotIP = 1,
ppresIPV4 = 2,
ppresIPV6 = 3,
ppresIPTooShort = 1,
ppresPCSOK = 1,
ppresCSOK = 2,
ppresCSBad = 3,
ppresXxpOther = 1,
ppresXxpKnown = 2,
ppresXxpIncomplete = 3,
ppresIsTCP = 0,
ppresIsUDP = 1,
}ppResult;
typedef union _tagTcpIpPacketParsingResult
{
struct {
/* 0 - not tested, 1 - not IP, 2 - IPV4, 3 - IPV6 */
ULONG ipStatus : 2;
/* 0 - not tested, 1 - n/a, 2 - CS, 3 - bad */
ULONG ipCheckSum : 2;
/* 0 - not tested, 1 - PCS, 2 - CS, 3 - bad */
ULONG xxpCheckSum : 2;
/* 0 - not tested, 1 - other, 2 - known(contains basic TCP or UDP header), 3 - known incomplete */
ULONG xxpStatus : 2;
/* 1 - contains complete payload */
ULONG xxpFull : 1;
ULONG TcpUdp : 1;
ULONG fixedIpCS : 1;
ULONG fixedXxpCS : 1;
ULONG IsFragment : 1;
ULONG reserved : 3;
ULONG ipHeaderSize : 8;
ULONG XxpIpHeaderSize : 8;
};
ULONG value;
}tTcpIpPacketParsingResult;
typedef enum _tagPacketOffloadRequest
{
pcrIpChecksum = (1 << 0),
pcrTcpV4Checksum = (1 << 1),
pcrUdpV4Checksum = (1 << 2),
pcrTcpV6Checksum = (1 << 3),
pcrUdpV6Checksum = (1 << 4),
pcrTcpChecksum = (pcrTcpV4Checksum | pcrTcpV6Checksum),
pcrUdpChecksum = (pcrUdpV4Checksum | pcrUdpV6Checksum),
pcrAnyChecksum = (pcrIpChecksum | pcrTcpV4Checksum | pcrUdpV4Checksum | pcrTcpV6Checksum | pcrUdpV6Checksum),
pcrLSO = (1 << 5),
pcrIsIP = (1 << 6),
pcrFixIPChecksum = (1 << 7),
pcrFixPHChecksum = (1 << 8),
pcrFixTcpV4Checksum = (1 << 9),
pcrFixUdpV4Checksum = (1 << 10),
pcrFixTcpV6Checksum = (1 << 11),
pcrFixUdpV6Checksum = (1 << 12),
pcrFixXxpChecksum = (pcrFixTcpV4Checksum | pcrFixUdpV4Checksum | pcrFixTcpV6Checksum | pcrFixUdpV6Checksum),
pcrPriorityTag = (1 << 13),
pcrNoIndirect = (1 << 14)
}tPacketOffloadRequest;
// sw offload
tTcpIpPacketParsingResult ParaNdis_CheckSumVerify(PVOID buffer, ULONG size, ULONG flags, LPCSTR caller);
tTcpIpPacketParsingResult ParaNdis_ReviewIPPacket(PVOID buffer, ULONG size, LPCSTR caller);
void ParaNdis_PadPacketReceived(PVOID pDataBuffer, PULONG pLength);
#endif

View file

@ -0,0 +1,45 @@
/*
* This file contains version resource related definitions
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <windows.h>
#include "ntverp.h"
#include "vendor.ver"
#undef __BUILDMACHINE__
#undef VER_FILEDESCRIPTION_STR
#undef VER_INTERNALNAME_STR
#define VER_LANGNEUTRAL
#define VENDOR_VIRTIO_PRODUCT VENDOR_PRODUCT_PREFIX "VirtIO Ethernet Adapter"
#define VER_FILEDESCRIPTION_STR VENDOR_DESC_PREFIX "NDIS Miniport Driver" VENDOR_DESC_POSTFIX
#define VER_INTERNALNAME_STR "netkvm.sys"
#include "common.ver"

View file

@ -0,0 +1,64 @@
/*
* This file contains rhel vendor specific
* resource (version) definitions for all drivers
*
* Copyright (c) 2017 Parallels IP Holdings GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* These defines are only for Visual Studio built-in rc editor
*/
#ifndef _NT_TARGET_MAJ
#define _NT_TARGET_MAJ 1
#define _RHEL_RELEASE_VERSION_ 20
#define _BUILD_MAJOR_VERSION_ 300
#define _BUILD_MINOR_VERSION_ 5800
#endif
/*
* rhel versioning
*/
#define VENDOR_VIRTIO_1 _NT_TARGET_MAJ
#define VENDOR_VIRTIO_2 _RHEL_RELEASE_VERSION_
#define VENDOR_VIRTIO_3 _BUILD_MAJOR_VERSION_
#define VENDOR_VIRTIO_4 _BUILD_MINOR_VERSION_
/*
* rhel strings
*/
#define VENDOR_VIRTIO_COPYRIGHT "Copyright (C) " STRINGIFY(RHEL_COPYRIGHT_YEARS) " Red Hat, Inc."
#define VENDOR_VIRTIO_COMPANY "Red Hat,\040Inc."
#define VENDOR_PREFIX "Red Hat\040"
#define VENDOR_PRODUCT_PREFIX VENDOR_PREFIX
#define QEMU_PRODUCT_PREFIX "QEMU\040"
#define VENDOR_DESC_PREFIX VENDOR_PREFIX
#define VENDOR_DESC_POSTFIX ""
/*
* remaining macro should be defined in project .rc file
*
* VENDOR_VIRTIO_PRODUCT, VER_FILEDESCRIPTION_STR, VER_INTERNALNAME_STR
*/

View file

@ -0,0 +1,619 @@
/*
* This file contains SW Implementation of checksum computation for IP,TCP,UDP
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "ndis56common.h"
#ifdef WPP_EVENT_TRACING
#include "sw-offload.tmh"
#endif
#include <sal.h>
// till IP header size is 8 bit
#define MAX_SUPPORTED_IPV6_HEADERS (256 - 4)
typedef ULONG IPV6_ADDRESS[4];
// IPv6 Header RFC 2460 (40 bytes)
typedef struct _tagIPv6Header {
UCHAR ip6_ver_tc; // traffic class(low nibble), version (high nibble)
UCHAR ip6_tc_fl; // traffic class(high nibble), flow label
USHORT ip6_fl; // flow label, the rest
USHORT ip6_payload_len; // length of following headers and payload
UCHAR ip6_next_header; // next header type
UCHAR ip6_hoplimit; // hop limit
IPV6_ADDRESS ip6_src_address; //
IPV6_ADDRESS ip6_dst_address; //
} IPv6Header;
typedef union
{
IPv6Header v6;
IPv4Header v4;
} IPHeader;
// IPv6 Header RFC 2460 (n*8 bytes)
typedef struct _tagIPv6ExtHeader {
UCHAR ip6ext_next_header; // next header type
UCHAR ip6ext_hdr_len; // length of this header in 8 bytes unit, not including first 8 bytes
USHORT options; //
} IPv6ExtHeader;
// IP Pseudo Header RFC 768
typedef struct _tagIPv4PseudoHeader {
ULONG ipph_src; // Source address
ULONG ipph_dest; // Destination address
UCHAR ipph_zero; // 0
UCHAR ipph_protocol; // TCP/UDP
USHORT ipph_length; // TCP/UDP length
}tIPv4PseudoHeader;
// IPv6 Pseudo Header RFC 2460
typedef struct _tagIPv6PseudoHeader {
IPV6_ADDRESS ipph_src; // Source address
IPV6_ADDRESS ipph_dest; // Destination address
ULONG ipph_length; // TCP/UDP length
UCHAR z1; // 0
UCHAR z2; // 0
UCHAR z3; // 0
UCHAR ipph_protocol; // TCP/UDP
}tIPv6PseudoHeader;
#define PROTOCOL_TCP 6
#define PROTOCOL_UDP 17
#define IP_HEADER_LENGTH(pHeader) (((pHeader)->ip_verlen & 0x0F) << 2)
#define TCP_HEADER_LENGTH(pHeader) ((pHeader->tcp_flags & 0xF0) >> 2)
static __inline USHORT CheckSumCalculator(ULONG val, PVOID buffer, ULONG len)
{
PUSHORT pus = (PUSHORT)buffer;
ULONG count = len >> 1;
while (count--) val += *pus++;
if (len & 1) val += (USHORT)*(PUCHAR)pus;
val = (((val >> 16) | (val << 16)) + val) >> 16;
return (USHORT)~val;
}
/******************************************
IP header checksum calculator
*******************************************/
static __inline VOID CalculateIpChecksum(IPv4Header *pIpHeader)
{
pIpHeader->ip_xsum = 0;
pIpHeader->ip_xsum = CheckSumCalculator(0, pIpHeader, IP_HEADER_LENGTH(pIpHeader));
}
static __inline tTcpIpPacketParsingResult
ProcessTCPHeader(tTcpIpPacketParsingResult _res, PVOID pIpHeader, ULONG len, USHORT ipHeaderSize)
{
ULONG tcpipDataAt;
tTcpIpPacketParsingResult res = _res;
tcpipDataAt = ipHeaderSize + sizeof(TCPHeader);
res.xxpStatus = ppresXxpIncomplete;
res.TcpUdp = ppresIsTCP;
if (len >= tcpipDataAt)
{
TCPHeader *pTcpHeader = (TCPHeader *)RtlOffsetToPointer(pIpHeader, ipHeaderSize);
res.xxpStatus = ppresXxpKnown;
tcpipDataAt = ipHeaderSize + TCP_HEADER_LENGTH(pTcpHeader);
res.XxpIpHeaderSize = tcpipDataAt;
}
else
{
DPrintf(2, ("tcp: %d < min headers %d", len, tcpipDataAt));
}
return res;
}
static __inline tTcpIpPacketParsingResult
ProcessUDPHeader(tTcpIpPacketParsingResult _res, PVOID pIpHeader, ULONG len, USHORT ipHeaderSize)
{
tTcpIpPacketParsingResult res = _res;
ULONG udpDataStart = ipHeaderSize + sizeof(UDPHeader);
res.xxpStatus = ppresXxpIncomplete;
res.TcpUdp = ppresIsUDP;
res.XxpIpHeaderSize = udpDataStart;
if (len >= udpDataStart)
{
UDPHeader *pUdpHeader = (UDPHeader *)RtlOffsetToPointer(pIpHeader, ipHeaderSize);
USHORT datagramLength = swap_short(pUdpHeader->udp_length);
res.xxpStatus = ppresXxpKnown;
// may be full or not, but the datagram length is known
DPrintf(2, ("udp: len %d, datagramLength %d", len, datagramLength));
}
return res;
}
static __inline tTcpIpPacketParsingResult
QualifyIpPacket(IPHeader *pIpHeader, ULONG len)
{
tTcpIpPacketParsingResult res;
UCHAR ver_len = pIpHeader->v4.ip_verlen;
UCHAR ip_version = (ver_len & 0xF0) >> 4;
USHORT ipHeaderSize = 0;
USHORT fullLength = 0;
res.value = 0;
if (ip_version == 4)
{
ipHeaderSize = (ver_len & 0xF) << 2;
fullLength = swap_short(pIpHeader->v4.ip_length);
DPrintf(3, ("ip_version %d, ipHeaderSize %d, protocol %d, iplen %d",
ip_version, ipHeaderSize, pIpHeader->v4.ip_protocol, fullLength));
res.ipStatus = (ipHeaderSize >= sizeof(IPv4Header)) ? ppresIPV4 : ppresNotIP;
if (len < ipHeaderSize) res.ipCheckSum = ppresIPTooShort;
if (fullLength) {}
else
{
DPrintf(2, ("ip v.%d, iplen %d", ip_version, fullLength));
}
}
else if (ip_version == 6)
{
UCHAR nextHeader = pIpHeader->v6.ip6_next_header;
BOOLEAN bParsingDone = FALSE;
ipHeaderSize = sizeof(pIpHeader->v6);
res.ipStatus = ppresIPV6;
res.ipCheckSum = ppresCSOK;
fullLength = swap_short(pIpHeader->v6.ip6_payload_len);
fullLength += ipHeaderSize;
while (nextHeader != 59)
{
IPv6ExtHeader *pExt;
switch (nextHeader)
{
case PROTOCOL_TCP:
bParsingDone = TRUE;
res.xxpStatus = ppresXxpKnown;
res.TcpUdp = ppresIsTCP;
res.xxpFull = len >= fullLength ? 1 : 0;
res = ProcessTCPHeader(res, pIpHeader, len, ipHeaderSize);
break;
case PROTOCOL_UDP:
bParsingDone = TRUE;
res.xxpStatus = ppresXxpKnown;
res.TcpUdp = ppresIsUDP;
res.xxpFull = len >= fullLength ? 1 : 0;
res = ProcessUDPHeader(res, pIpHeader, len, ipHeaderSize);
break;
//existing extended headers
case 0:
__fallthrough;
case 60:
__fallthrough;
case 43:
__fallthrough;
case 44:
__fallthrough;
case 51:
__fallthrough;
case 50:
__fallthrough;
case 135:
if (len >= ((ULONG)ipHeaderSize + 8))
{
pExt = (IPv6ExtHeader *)((PUCHAR)pIpHeader + ipHeaderSize);
nextHeader = pExt->ip6ext_next_header;
ipHeaderSize += 8;
ipHeaderSize += pExt->ip6ext_hdr_len * 8;
}
else
{
DPrintf(0, ("[%s] ERROR: Break in the middle of ext. headers(len %d, hdr > %d)", __FUNCTION__, len, ipHeaderSize));
res.ipStatus = ppresNotIP;
bParsingDone = TRUE;
}
break;
//any other protocol
default:
res.xxpStatus = ppresXxpOther;
bParsingDone = TRUE;
break;
}
if (bParsingDone)
break;
}
if (ipHeaderSize <= MAX_SUPPORTED_IPV6_HEADERS)
{
DPrintf(3, ("ip_version %d, ipHeaderSize %d, protocol %d, iplen %d",
ip_version, ipHeaderSize, nextHeader, fullLength));
res.ipHeaderSize = ipHeaderSize;
}
else
{
DPrintf(0, ("[%s] ERROR: IP chain is too large (%d)", __FUNCTION__, ipHeaderSize));
res.ipStatus = ppresNotIP;
}
}
if (res.ipStatus == ppresIPV4)
{
res.ipHeaderSize = ipHeaderSize;
res.xxpFull = len >= fullLength ? 1 : 0;
// bit "more fragments" or fragment offset mean the packet is fragmented
res.IsFragment = (pIpHeader->v4.ip_offset & ~0xC0) != 0;
switch (pIpHeader->v4.ip_protocol)
{
case PROTOCOL_TCP:
{
res = ProcessTCPHeader(res, pIpHeader, len, ipHeaderSize);
}
break;
case PROTOCOL_UDP:
{
res = ProcessUDPHeader(res, pIpHeader, len, ipHeaderSize);
}
break;
default:
res.xxpStatus = ppresXxpOther;
break;
}
}
return res;
}
static __inline USHORT GetXxpHeaderAndPayloadLen(IPHeader *pIpHeader, tTcpIpPacketParsingResult res)
{
if (res.ipStatus == ppresIPV4)
{
USHORT headerLength = IP_HEADER_LENGTH(&pIpHeader->v4);
USHORT len = swap_short(pIpHeader->v4.ip_length);
return len - headerLength;
}
if (res.ipStatus == ppresIPV6)
{
USHORT fullLength = swap_short(pIpHeader->v6.ip6_payload_len);
return fullLength + sizeof(pIpHeader->v6) - (USHORT)res.ipHeaderSize;
}
return 0;
}
static __inline USHORT CalculateIpv4PseudoHeaderChecksum(IPv4Header *pIpHeader, USHORT headerAndPayloadLen)
{
tIPv4PseudoHeader ipph;
USHORT checksum;
ipph.ipph_src = pIpHeader->ip_src;
ipph.ipph_dest = pIpHeader->ip_dest;
ipph.ipph_zero = 0;
ipph.ipph_protocol = pIpHeader->ip_protocol;
ipph.ipph_length = swap_short(headerAndPayloadLen);
checksum = CheckSumCalculator(0, &ipph, sizeof(ipph));
return ~checksum;
}
static __inline USHORT CalculateIpv6PseudoHeaderChecksum(IPv6Header *pIpHeader, USHORT headerAndPayloadLen)
{
tIPv6PseudoHeader ipph;
USHORT checksum;
ipph.ipph_src[0] = pIpHeader->ip6_src_address[0];
ipph.ipph_src[1] = pIpHeader->ip6_src_address[1];
ipph.ipph_src[2] = pIpHeader->ip6_src_address[2];
ipph.ipph_src[3] = pIpHeader->ip6_src_address[3];
ipph.ipph_dest[0] = pIpHeader->ip6_dst_address[0];
ipph.ipph_dest[1] = pIpHeader->ip6_dst_address[1];
ipph.ipph_dest[2] = pIpHeader->ip6_dst_address[2];
ipph.ipph_dest[3] = pIpHeader->ip6_dst_address[3];
ipph.z1 = ipph.z2 = ipph.z3 = 0;
ipph.ipph_protocol = pIpHeader->ip6_next_header;
ipph.ipph_length = swap_short(headerAndPayloadLen);
checksum = CheckSumCalculator(0, &ipph, sizeof(ipph));
return ~checksum;
}
static __inline USHORT CalculateIpPseudoHeaderChecksum(IPHeader *pIpHeader,
tTcpIpPacketParsingResult res,
USHORT headerAndPayloadLen)
{
if (res.ipStatus == ppresIPV4)
return CalculateIpv4PseudoHeaderChecksum(&pIpHeader->v4, headerAndPayloadLen);
if (res.ipStatus == ppresIPV6)
return CalculateIpv6PseudoHeaderChecksum(&pIpHeader->v6, headerAndPayloadLen);
return 0;
}
static __inline BOOLEAN
CompareNetCheckSumOnEndSystem(USHORT computedChecksum, USHORT arrivedChecksum)
{
//According to RFC 1624 sec. 3
//Checksum verification mechanism should treat 0xFFFF
//checksum value from received packet as 0x0000
if(arrivedChecksum == 0xFFFF)
arrivedChecksum = 0;
return computedChecksum == arrivedChecksum;
}
/******************************************
Calculates IP header checksum calculator
it can be already calculated
the header must be complete!
*******************************************/
static __inline tTcpIpPacketParsingResult
VerifyIpChecksum(
IPv4Header *pIpHeader,
tTcpIpPacketParsingResult known,
BOOLEAN bFix)
{
tTcpIpPacketParsingResult res = known;
if (res.ipCheckSum != ppresIPTooShort)
{
USHORT saved = pIpHeader->ip_xsum;
CalculateIpChecksum(pIpHeader);
res.ipCheckSum = CompareNetCheckSumOnEndSystem(pIpHeader->ip_xsum, saved) ? ppresCSOK : ppresCSBad;
if (!bFix)
pIpHeader->ip_xsum = saved;
else
res.fixedIpCS = res.ipCheckSum == ppresCSBad;
}
return res;
}
/*********************************************
Calculates UDP checksum, assuming the checksum field
is initialized with pseudoheader checksum
**********************************************/
static VOID CalculateUdpChecksumGivenPseudoCS(UDPHeader *pUdpHeader, ULONG udpLength)
{
pUdpHeader->udp_xsum = CheckSumCalculator(0, pUdpHeader, udpLength);
}
/*********************************************
Calculates TCP checksum, assuming the checksum field
is initialized with pseudoheader checksum
**********************************************/
static __inline VOID CalculateTcpChecksumGivenPseudoCS(TCPHeader *pTcpHeader, ULONG tcpLength)
{
pTcpHeader->tcp_xsum = CheckSumCalculator(0, pTcpHeader, tcpLength);
}
/************************************************
Checks (and fix if required) the TCP checksum
sets flags in result structure according to verification
TcpPseudoOK if valid pseudo CS was found
TcpOK if valid TCP checksum was found
************************************************/
static __inline tTcpIpPacketParsingResult
VerifyTcpChecksum( IPHeader *pIpHeader, ULONG len, tTcpIpPacketParsingResult known, ULONG whatToFix)
{
USHORT phcs;
tTcpIpPacketParsingResult res = known;
TCPHeader *pTcpHeader = (TCPHeader *)RtlOffsetToPointer(pIpHeader, res.ipHeaderSize);
USHORT saved = pTcpHeader->tcp_xsum;
USHORT xxpHeaderAndPayloadLen = GetXxpHeaderAndPayloadLen(pIpHeader, res);
if (len >= res.ipHeaderSize)
{
phcs = CalculateIpPseudoHeaderChecksum(pIpHeader, res, xxpHeaderAndPayloadLen);
res.xxpCheckSum = CompareNetCheckSumOnEndSystem(phcs, saved) ? ppresPCSOK : ppresCSBad;
if (res.xxpCheckSum != ppresPCSOK || whatToFix)
{
if (whatToFix & pcrFixPHChecksum)
{
if (len >= (ULONG)(res.ipHeaderSize + sizeof(*pTcpHeader)))
{
pTcpHeader->tcp_xsum = phcs;
res.fixedXxpCS = res.xxpCheckSum != ppresPCSOK;
}
else
res.xxpStatus = ppresXxpIncomplete;
}
else if (res.xxpFull)
{
//USHORT ipFullLength = swap_short(pIpHeader->v4.ip_length);
pTcpHeader->tcp_xsum = phcs;
CalculateTcpChecksumGivenPseudoCS(pTcpHeader, xxpHeaderAndPayloadLen);
if (CompareNetCheckSumOnEndSystem(pTcpHeader->tcp_xsum, saved))
res.xxpCheckSum = ppresCSOK;
if (!(whatToFix & pcrFixXxpChecksum))
pTcpHeader->tcp_xsum = saved;
else
res.fixedXxpCS =
res.xxpCheckSum == ppresCSBad || res.xxpCheckSum == ppresPCSOK;
}
else if (whatToFix)
{
res.xxpStatus = ppresXxpIncomplete;
}
}
else if (res.xxpFull)
{
// we have correct PHCS and we do not need to fix anything
// there is a very small chance that it is also good TCP CS
// in such rare case we give a priority to TCP CS
CalculateTcpChecksumGivenPseudoCS(pTcpHeader, xxpHeaderAndPayloadLen);
if (CompareNetCheckSumOnEndSystem(pTcpHeader->tcp_xsum, saved))
res.xxpCheckSum = ppresCSOK;
pTcpHeader->tcp_xsum = saved;
}
}
else
res.ipCheckSum = ppresIPTooShort;
return res;
}
/************************************************
Checks (and fix if required) the UDP checksum
sets flags in result structure according to verification
UdpPseudoOK if valid pseudo CS was found
UdpOK if valid UDP checksum was found
************************************************/
static __inline tTcpIpPacketParsingResult
VerifyUdpChecksum( IPHeader *pIpHeader, ULONG len, tTcpIpPacketParsingResult known, ULONG whatToFix)
{
USHORT phcs;
tTcpIpPacketParsingResult res = known;
UDPHeader *pUdpHeader = (UDPHeader *)RtlOffsetToPointer(pIpHeader, res.ipHeaderSize);
USHORT saved = pUdpHeader->udp_xsum;
USHORT xxpHeaderAndPayloadLen = GetXxpHeaderAndPayloadLen(pIpHeader, res);
if (len >= res.ipHeaderSize)
{
phcs = CalculateIpPseudoHeaderChecksum(pIpHeader, res, xxpHeaderAndPayloadLen);
res.xxpCheckSum = CompareNetCheckSumOnEndSystem(phcs, saved) ? ppresPCSOK : ppresCSBad;
if (whatToFix & pcrFixPHChecksum)
{
if (len >= (ULONG)(res.ipHeaderSize + sizeof(UDPHeader)))
{
pUdpHeader->udp_xsum = phcs;
res.fixedXxpCS = res.xxpCheckSum != ppresPCSOK;
}
else
res.xxpStatus = ppresXxpIncomplete;
}
else if (res.xxpCheckSum != ppresPCSOK || (whatToFix & pcrFixXxpChecksum))
{
if (res.xxpFull)
{
pUdpHeader->udp_xsum = phcs;
CalculateUdpChecksumGivenPseudoCS(pUdpHeader, xxpHeaderAndPayloadLen);
if (CompareNetCheckSumOnEndSystem(pUdpHeader->udp_xsum, saved))
res.xxpCheckSum = ppresCSOK;
if (!(whatToFix & pcrFixXxpChecksum))
pUdpHeader->udp_xsum = saved;
else
res.fixedXxpCS =
res.xxpCheckSum == ppresCSBad || res.xxpCheckSum == ppresPCSOK;
}
else
res.xxpCheckSum = ppresXxpIncomplete;
}
else if (res.xxpFull)
{
// we have correct PHCS and we do not need to fix anything
// there is a very small chance that it is also good UDP CS
// in such rare case we give a priority to UDP CS
CalculateUdpChecksumGivenPseudoCS(pUdpHeader, xxpHeaderAndPayloadLen);
if (CompareNetCheckSumOnEndSystem(pUdpHeader->udp_xsum, saved))
res.xxpCheckSum = ppresCSOK;
pUdpHeader->udp_xsum = saved;
}
}
else
res.ipCheckSum = ppresIPTooShort;
return res;
}
static LPCSTR __inline GetPacketCase(tTcpIpPacketParsingResult res)
{
static const char *const IPCaseName[4] = { "not tested", "Non-IP", "IPv4", "IPv6" };
if (res.xxpStatus == ppresXxpKnown) return res.TcpUdp == ppresIsTCP ?
(res.ipStatus == ppresIPV4 ? "TCPv4" : "TCPv6") :
(res.ipStatus == ppresIPV4 ? "UDPv4" : "UDPv6");
if (res.xxpStatus == ppresXxpIncomplete) return res.TcpUdp == ppresIsTCP ? "Incomplete TCP" : "Incomplete UDP";
if (res.xxpStatus == ppresXxpOther) return "IP";
return IPCaseName[res.ipStatus];
}
static LPCSTR __inline GetIPCSCase(tTcpIpPacketParsingResult res)
{
static const char *const CSCaseName[4] = { "not tested", "(too short)", "OK", "Bad" };
return CSCaseName[res.ipCheckSum];
}
static LPCSTR __inline GetXxpCSCase(tTcpIpPacketParsingResult res)
{
static const char *const CSCaseName[4] = { "-", "PCS", "CS", "Bad" };
return CSCaseName[res.xxpCheckSum];
}
static __inline VOID PrintOutParsingResult(
tTcpIpPacketParsingResult res,
int level,
LPCSTR procname)
{
DPrintf(level, ("[%s] %s packet IPCS %s%s, checksum %s%s", procname,
GetPacketCase(res),
GetIPCSCase(res),
res.fixedIpCS ? "(fixed)" : "",
GetXxpCSCase(res),
res.fixedXxpCS ? "(fixed)" : ""));
}
tTcpIpPacketParsingResult ParaNdis_CheckSumVerify(PVOID buffer, ULONG size, ULONG flags, LPCSTR caller)
{
tTcpIpPacketParsingResult res = QualifyIpPacket(buffer, size);
if (res.ipStatus == ppresIPV4)
{
if (flags & pcrIpChecksum)
res = VerifyIpChecksum(buffer, res, (flags & pcrFixIPChecksum) != 0);
if(res.xxpStatus == ppresXxpKnown)
{
if (res.TcpUdp == ppresIsTCP) /* TCP */
{
if(flags & pcrTcpV4Checksum)
{
res = VerifyTcpChecksum(buffer, size, res, flags & (pcrFixPHChecksum | pcrFixTcpV4Checksum));
}
}
else /* UDP */
{
if (flags & pcrUdpV4Checksum)
{
res = VerifyUdpChecksum(buffer, size, res, flags & (pcrFixPHChecksum | pcrFixUdpV4Checksum));
}
}
}
}
else if (res.ipStatus == ppresIPV6)
{
if(res.xxpStatus == ppresXxpKnown)
{
if (res.TcpUdp == ppresIsTCP) /* TCP */
{
if(flags & pcrTcpV6Checksum)
{
res = VerifyTcpChecksum(buffer, size, res, flags & (pcrFixPHChecksum | pcrFixTcpV6Checksum));
}
}
else /* UDP */
{
if (flags & pcrUdpV6Checksum)
{
res = VerifyUdpChecksum(buffer, size, res, flags & (pcrFixPHChecksum | pcrFixUdpV6Checksum));
}
}
}
}
PrintOutParsingResult(res, 1, caller);
return res;
}
tTcpIpPacketParsingResult ParaNdis_ReviewIPPacket(PVOID buffer, ULONG size, LPCSTR caller)
{
tTcpIpPacketParsingResult res = QualifyIpPacket(buffer, size);
PrintOutParsingResult(res, 1, caller);
return res;
}

View file

@ -0,0 +1,98 @@
/*
* This file contains resource (version) definitions for all drivers
* that are independent from vendor.
*
* Copyright (c) 2017 Parallels IP Holdings GmbH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* These defines are only for Visual Studio built-in rc editor
*
* VER_OS <= $(TargetOS) for description postfix
* VER_ARCH <= $(VerArch) for description postfix
* VENDOR_VER <= $(_VENDOR_).ver (rhel by default)
*/
#ifndef VER_OS
#define VER_OS Win??
#endif
#ifndef VER_ARCH
#define VER_ARCH x??
#endif
#ifndef VENDOR_VER
#define VENDOR_VER rhel.ver
//#define VENDOR_VER vz.ver
#endif
/*
* AUTO: these defines are defined in common.ver
* VER_PRODUCTVERSION <= VER_PRODUCTMAJORVERSION,VER_PRODUCTMINORVERSION,VER_PRODUCTBUILD,VER_PRODUCTBUILD_QFE
* VER_FILEVERSION <= VER_PRODUCTVERSION
* VER_FILEVERSION_STR <= VER_PRODUCTVERSION_STR
* VER_ORIGINALFILENAME_STR <= VER_INTERNALNAME_STR
*/
/*
* COMMON: these defines are strictly required
*/
#define VER_LANGNEUTRAL
#define VER_FILETYPE VFT_DRV
#define VER_FILESUBTYPE VFT2_DRV_SYSTEM
#define VER_FILEDESCRIPTION_STR "File Description required"
#define VER_INTERNALNAME_STR "File Name required"
/*
* STRINGIFY
*/
#define STRINGIFY_AUX(X) #X
#define STRINGIFY(X) STRINGIFY_AUX(X)
/*
* VENDOR SPECIFIC
*/
#include STRINGIFY(VENDOR_VER)
/*
* Applying vendor specific
*/
#undef VER_PRODUCTBUILD
#undef VER_PRODUCTBUILD_QFE
#undef VER_PRODUCTMAJORVERSION
#undef VER_PRODUCTMINORVERSION
#define VER_PRODUCTMAJORVERSION VENDOR_VIRTIO_1
#define VER_PRODUCTMINORVERSION VENDOR_VIRTIO_2
#define VER_PRODUCTBUILD VENDOR_VIRTIO_3
#define VER_PRODUCTBUILD_QFE VENDOR_VIRTIO_4
#undef VER_LEGALTRADEMARKS_STR
#undef VER_LEGALCOPYRIGHT_STR
#undef VER_COMPANYNAME_STR
#undef VER_PRODUCTNAME_STR
#define VER_LEGALTRADEMARKS_STR ""
#define VER_LEGALCOPYRIGHT_STR VENDOR_VIRTIO_COPYRIGHT
#define VER_COMPANYNAME_STR VENDOR_VIRTIO_COMPANY
#define VER_PRODUCTNAME_STR VENDOR_VIRTIO_PRODUCT

View file

@ -0,0 +1,30 @@
Copyright 2009-2017 Red Hat, Inc. and/or its affiliates.
Copyright 2016 Google, Inc.
Copyright 2007 IBM Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,128 @@
#ifndef _LINUX_VIRTIO_H
#define _LINUX_VIRTIO_H
#include "virtio_ring.h"
#define scatterlist VirtIOBufferDescriptor
struct VirtIOBufferDescriptor {
PHYSICAL_ADDRESS physAddr;
ULONG length;
};
typedef int (*proc_virtqueue_add_buf)(
struct virtqueue *vq,
struct scatterlist sg[],
unsigned int out_num,
unsigned int in_num,
void *opaque,
void *va_indirect,
ULONGLONG phys_indirect);
typedef bool(*proc_virtqueue_kick_prepare)(struct virtqueue *vq);
typedef void(*proc_virtqueue_kick_always)(struct virtqueue *vq);
typedef void * (*proc_virtqueue_get_buf)(struct virtqueue *vq, unsigned int *len);
typedef void(*proc_virtqueue_disable_cb)(struct virtqueue *vq);
typedef bool(*proc_virtqueue_enable_cb)(struct virtqueue *vq);
typedef bool(*proc_virtqueue_enable_cb_delayed)(struct virtqueue *vq);
typedef void * (*proc_virtqueue_detach_unused_buf)(struct virtqueue *vq);
typedef BOOLEAN(*proc_virtqueue_is_interrupt_enabled)(struct virtqueue *vq);
typedef BOOLEAN(*proc_virtqueue_has_buf)(struct virtqueue *vq);
typedef void(*proc_virtqueue_shutdown)(struct virtqueue *vq);
/* Represents one virtqueue; only data pointed to by the vring structure is exposed to the host */
struct virtqueue {
VirtIODevice *vdev;
unsigned int index;
void (*notification_cb)(struct virtqueue *vq);
void *notification_addr;
void *avail_va;
void *used_va;
proc_virtqueue_add_buf add_buf;
proc_virtqueue_kick_prepare kick_prepare;
proc_virtqueue_kick_always kick_always;
proc_virtqueue_get_buf get_buf;
proc_virtqueue_disable_cb disable_cb;
proc_virtqueue_enable_cb enable_cb;
proc_virtqueue_enable_cb_delayed enable_cb_delayed;
proc_virtqueue_detach_unused_buf detach_unused_buf;
proc_virtqueue_is_interrupt_enabled is_interrupt_enabled;
proc_virtqueue_has_buf has_buf;
proc_virtqueue_shutdown shutdown;
};
static inline int virtqueue_add_buf(
struct virtqueue *vq,
struct scatterlist sg[],
unsigned int out_num,
unsigned int in_num,
void *opaque,
void *va_indirect,
ULONGLONG phys_indirect)
{
return vq->add_buf(vq, sg, out_num, in_num, opaque, va_indirect, phys_indirect);
}
static inline bool virtqueue_kick_prepare(struct virtqueue *vq)
{
return vq->kick_prepare(vq);
}
static inline void virtqueue_kick_always(struct virtqueue *vq)
{
vq->kick_always(vq);
}
static inline void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
{
return vq->get_buf(vq, len);
}
static inline void virtqueue_disable_cb(struct virtqueue *vq)
{
vq->disable_cb(vq);
}
static inline bool virtqueue_enable_cb(struct virtqueue *vq)
{
return vq->enable_cb(vq);
}
static inline bool virtqueue_enable_cb_delayed(struct virtqueue *vq)
{
return vq->enable_cb_delayed(vq);
}
static inline void *virtqueue_detach_unused_buf(struct virtqueue *vq)
{
return vq->detach_unused_buf(vq);
}
static inline BOOLEAN virtqueue_is_interrupt_enabled(struct virtqueue *vq)
{
return vq->is_interrupt_enabled(vq);
}
static inline BOOLEAN virtqueue_has_buf(struct virtqueue *vq)
{
return vq->has_buf(vq);
}
static inline void virtqueue_shutdown(struct virtqueue *vq)
{
vq->shutdown(vq);
}
void virtqueue_notify(struct virtqueue *vq);
void virtqueue_kick(struct virtqueue *vq);
#endif /* _LINUX_VIRTIO_H */

View file

@ -0,0 +1,411 @@
/*
* Virtio PCI driver - common functionality for all device versions
*
* Copyright IBM Corp. 2007
* Copyright Red Hat, Inc. 2014
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Rusty Russell <rusty@rustcorp.com.au>
* Michael S. Tsirkin <mst@redhat.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "osdep.h"
#include "virtio_pci.h"
#include "VirtIO.h"
#include "kdebugprint.h"
#include <stddef.h>
#include "virtio_pci_common.h"
NTSTATUS virtio_device_initialize(VirtIODevice *vdev,
const VirtIOSystemOps *pSystemOps,
PVOID DeviceContext,
bool msix_used)
{
NTSTATUS status;
RtlZeroMemory(vdev, sizeof(VirtIODevice));
vdev->DeviceContext = DeviceContext;
vdev->system = pSystemOps;
vdev->msix_used = msix_used;
vdev->info = vdev->inline_info;
vdev->maxQueues = ARRAYSIZE(vdev->inline_info);
status = vio_modern_initialize(vdev);
if (status == STATUS_DEVICE_NOT_CONNECTED) {
/* fall back to legacy virtio device */
status = vio_legacy_initialize(vdev);
}
if (NT_SUCCESS(status)) {
/* Always start by resetting the device */
virtio_device_reset(vdev);
/* Acknowledge that we've seen the device. */
virtio_add_status(vdev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
/* If we are here, we must have found a driver for the device */
virtio_add_status(vdev, VIRTIO_CONFIG_S_DRIVER);
}
return status;
}
void virtio_device_shutdown(VirtIODevice *vdev)
{
if (vdev->info &&
vdev->info != vdev->inline_info) {
mem_free_nonpaged_block(vdev, vdev->info);
vdev->info = NULL;
}
}
u8 virtio_get_status(VirtIODevice *vdev)
{
return vdev->device->get_status(vdev);
}
void virtio_set_status(VirtIODevice *vdev, u8 status)
{
vdev->device->set_status(vdev, status);
}
void virtio_add_status(VirtIODevice *vdev, u8 status)
{
vdev->device->set_status(vdev, (u8)(vdev->device->get_status(vdev) | status));
}
void virtio_device_reset(VirtIODevice *vdev)
{
vdev->device->reset(vdev);
}
void virtio_device_ready(VirtIODevice *vdev)
{
unsigned status = vdev->device->get_status(vdev);
ASSERT(!(status & VIRTIO_CONFIG_S_DRIVER_OK));
vdev->device->set_status(vdev, (u8)(status | VIRTIO_CONFIG_S_DRIVER_OK));
}
u64 virtio_get_features(VirtIODevice *vdev)
{
return vdev->device->get_features(vdev);
}
NTSTATUS virtio_set_features(VirtIODevice *vdev, u64 features)
{
unsigned char dev_status;
NTSTATUS status;
vdev->event_suppression_enabled = virtio_is_feature_enabled(features, VIRTIO_RING_F_EVENT_IDX);
vdev->packed_ring = virtio_is_feature_enabled(features, VIRTIO_F_RING_PACKED);
status = vdev->device->set_features(vdev, features);
if (!NT_SUCCESS(status)) {
return status;
}
if (!virtio_is_feature_enabled(features, VIRTIO_F_VERSION_1)) {
return status;
}
virtio_add_status(vdev, VIRTIO_CONFIG_S_FEATURES_OK);
dev_status = vdev->device->get_status(vdev);
if (!(dev_status & VIRTIO_CONFIG_S_FEATURES_OK)) {
DPrintf(0, "virtio: device refuses features: %x\n", dev_status);
status = STATUS_INVALID_PARAMETER;
}
return status;
}
/* Read @count fields, @bytes each. */
static void virtio_cread_many(VirtIODevice *vdev,
unsigned int offset,
void *buf, size_t count, size_t bytes)
{
u32 old, gen = vdev->device->get_config_generation ?
vdev->device->get_config_generation(vdev) : 0;
size_t i;
do {
old = gen;
for (i = 0; i < count; i++) {
vdev->device->get_config(vdev, (unsigned)(offset + bytes * i),
(char *)buf + i * bytes, (unsigned)bytes);
}
gen = vdev->device->get_config_generation ?
vdev->device->get_config_generation(vdev) : 0;
} while (gen != old);
}
void virtio_get_config(VirtIODevice *vdev, unsigned offset,
void *buf, unsigned len)
{
switch (len) {
case 1:
case 2:
case 4:
vdev->device->get_config(vdev, offset, buf, len);
break;
case 8:
virtio_cread_many(vdev, offset, buf, 2, sizeof(u32));
break;
default:
virtio_cread_many(vdev, offset, buf, len, 1);
break;
}
}
/* Write @count fields, @bytes each. */
static void virtio_cwrite_many(VirtIODevice *vdev,
unsigned int offset,
void *buf, size_t count, size_t bytes)
{
size_t i;
for (i = 0; i < count; i++) {
vdev->device->set_config(vdev, (unsigned)(offset + bytes * i),
(char *)buf + i * bytes, (unsigned)bytes);
}
}
void virtio_set_config(VirtIODevice *vdev, unsigned offset,
void *buf, unsigned len)
{
switch (len) {
case 1:
case 2:
case 4:
vdev->device->set_config(vdev, offset, buf, len);
break;
case 8:
virtio_cwrite_many(vdev, offset, buf, 2, sizeof(u32));
break;
default:
virtio_cwrite_many(vdev, offset, buf, len, 1);
break;
}
}
NTSTATUS virtio_query_queue_allocation(VirtIODevice *vdev,
unsigned index,
unsigned short *pNumEntries,
unsigned long *pRingSize,
unsigned long *pHeapSize)
{
return vdev->device->query_queue_alloc(vdev, index, pNumEntries, pRingSize, pHeapSize);
}
NTSTATUS virtio_reserve_queue_memory(VirtIODevice *vdev, unsigned nvqs)
{
if (nvqs > vdev->maxQueues) {
/* allocate new space for queue infos */
void *new_info = mem_alloc_nonpaged_block(vdev, nvqs * virtio_get_queue_descriptor_size());
if (!new_info) {
return STATUS_INSUFFICIENT_RESOURCES;
}
if (vdev->info && vdev->info != vdev->inline_info) {
mem_free_nonpaged_block(vdev, vdev->info);
}
vdev->info = new_info;
vdev->maxQueues = nvqs;
}
return STATUS_SUCCESS;
}
static NTSTATUS vp_setup_vq(struct virtqueue **queue,
VirtIODevice *vdev, unsigned index,
u16 msix_vec)
{
VirtIOQueueInfo *info = &vdev->info[index];
NTSTATUS status = vdev->device->setup_queue(queue, vdev, info, index, msix_vec);
if (NT_SUCCESS(status)) {
info->vq = *queue;
}
return status;
}
NTSTATUS virtio_find_queue(VirtIODevice *vdev, unsigned index,
struct virtqueue **vq)
{
u16 msix_vec = vdev_get_msix_vector(vdev, index);
return vp_setup_vq(
vq,
vdev,
index,
msix_vec);
}
NTSTATUS virtio_find_queues(VirtIODevice *vdev,
unsigned nvqs,
struct virtqueue *vqs[])
{
unsigned i;
NTSTATUS status;
u16 msix_vec;
status = virtio_reserve_queue_memory(vdev, nvqs);
if (!NT_SUCCESS(status)) {
return status;
}
/* set up the device config interrupt */
msix_vec = vdev_get_msix_vector(vdev, -1);
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
msix_vec = vdev->device->set_config_vector(vdev, msix_vec);
/* Verify we had enough resources to assign the vector */
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
status = STATUS_DEVICE_BUSY;
goto error_find;
}
}
/* set up queue interrupts */
for (i = 0; i < nvqs; i++) {
msix_vec = vdev_get_msix_vector(vdev, i);
status = vp_setup_vq(
&vqs[i],
vdev,
i,
msix_vec);
if (!NT_SUCCESS(status)) {
goto error_find;
}
}
return STATUS_SUCCESS;
error_find:
virtio_delete_queues(vdev);
return status;
}
void virtio_delete_queue(struct virtqueue *vq)
{
VirtIODevice *vdev = vq->vdev;
unsigned i = vq->index;
vdev->device->delete_queue(&vdev->info[i]);
vdev->info[i].vq = NULL;
}
void virtio_delete_queues(VirtIODevice *vdev)
{
struct virtqueue *vq;
unsigned i;
if (vdev->info == NULL)
return;
for (i = 0; i < vdev->maxQueues; i++) {
vq = vdev->info[i].vq;
if (vq != NULL) {
vdev->device->delete_queue(&vdev->info[i]);
vdev->info[i].vq = NULL;
}
}
}
u32 virtio_get_queue_size(struct virtqueue *vq)
{
return vq->vdev->info[vq->index].num;
}
u16 virtio_set_config_vector(VirtIODevice *vdev, u16 vector)
{
return vdev->device->set_config_vector(vdev, vector);
}
u16 virtio_set_queue_vector(struct virtqueue *vq, u16 vector)
{
return vq->vdev->device->set_queue_vector(vq, vector);
}
u8 virtio_read_isr_status(VirtIODevice *vdev)
{
return ioread8(vdev, vdev->isr);
}
int virtio_get_bar_index(PPCI_COMMON_HEADER pPCIHeader, PHYSICAL_ADDRESS BasePA)
{
int iBar, i;
/* no point in supporting PCI and CardBus bridges */
ASSERT((pPCIHeader->HeaderType & ~PCI_MULTIFUNCTION) == PCI_DEVICE_TYPE);
for (i = 0; i < PCI_TYPE0_ADDRESSES; i++) {
PHYSICAL_ADDRESS BAR;
BAR.LowPart = pPCIHeader->u.type0.BaseAddresses[i];
iBar = i;
if (BAR.LowPart & PCI_ADDRESS_IO_SPACE) {
/* I/O space */
BAR.LowPart &= PCI_ADDRESS_IO_ADDRESS_MASK;
BAR.HighPart = 0;
} else if ((BAR.LowPart & PCI_ADDRESS_MEMORY_TYPE_MASK) == PCI_TYPE_64BIT) {
/* memory space 64-bit */
BAR.LowPart &= PCI_ADDRESS_MEMORY_ADDRESS_MASK;
BAR.HighPart = pPCIHeader->u.type0.BaseAddresses[++i];
} else {
/* memory space 32-bit */
BAR.LowPart &= PCI_ADDRESS_MEMORY_ADDRESS_MASK;
BAR.HighPart = 0;
}
if (BAR.QuadPart == BasePA.QuadPart) {
return iBar;
}
}
return -1;
}
/* The notify function used when creating a virt queue, common to both modern
* and legacy (the difference is in how vq->notification_addr is set up).
*/
void vp_notify(struct virtqueue *vq)
{
/* we write the queue's selector into the notification register to
* signal the other end */
iowrite16(vq->vdev, (unsigned short)vq->index, vq->notification_addr);
DPrintf(6, "virtio: vp_notify vq->index = %x\n", vq->index);
}
void virtqueue_notify(struct virtqueue *vq)
{
vq->notification_cb(vq);
}
void virtqueue_kick(struct virtqueue *vq)
{
if (virtqueue_kick_prepare(vq)) {
virtqueue_notify(vq);
}
}

View file

@ -0,0 +1,283 @@
/*
* Virtio PCI driver - legacy (virtio 0.9) device support
*
* Copyright IBM Corp. 2007
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Windows porting - Yan Vugenfirer <yvugenfi@redhat.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "osdep.h"
#include "virtio_pci.h"
#include "VirtIO.h"
#include "kdebugprint.h"
#include "virtio_ring.h"
#include "virtio_pci_common.h"
#include "windows/virtio_ring_allocation.h"
#ifdef WPP_EVENT_TRACING
#include "VirtIOPCILegacy.tmh"
#endif
/////////////////////////////////////////////////////////////////////////////////////
//
// vio_legacy_dump_registers - Dump HW registers of the device
//
/////////////////////////////////////////////////////////////////////////////////////
void vio_legacy_dump_registers(VirtIODevice *vdev)
{
DPrintf(5, "%s\n", __FUNCTION__);
DPrintf(0, "[VIRTIO_PCI_HOST_FEATURES] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_HOST_FEATURES));
DPrintf(0, "[VIRTIO_PCI_GUEST_FEATURES] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_GUEST_FEATURES));
DPrintf(0, "[VIRTIO_PCI_QUEUE_PFN] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_PFN));
DPrintf(0, "[VIRTIO_PCI_QUEUE_NUM] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NUM));
DPrintf(0, "[VIRTIO_PCI_QUEUE_SEL] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_SEL));
DPrintf(0, "[VIRTIO_PCI_QUEUE_NOTIFY] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NOTIFY));
DPrintf(0, "[VIRTIO_PCI_STATUS] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_STATUS));
DPrintf(0, "[VIRTIO_PCI_ISR] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_ISR));
}
static void vio_legacy_get_config(VirtIODevice * vdev,
unsigned offset,
void *buf,
unsigned len)
{
ULONG_PTR ioaddr = vdev->addr + VIRTIO_PCI_CONFIG(vdev->msix_used) + offset;
u8 *ptr = buf;
unsigned i;
DPrintf(5, "%s\n", __FUNCTION__);
for (i = 0; i < len; i++) {
ptr[i] = ioread8(vdev, ioaddr + i);
}
}
static void vio_legacy_set_config(VirtIODevice *vdev,
unsigned offset,
const void *buf,
unsigned len)
{
ULONG_PTR ioaddr = vdev->addr + VIRTIO_PCI_CONFIG(vdev->msix_used) + offset;
const u8 *ptr = buf;
unsigned i;
DPrintf(5, "%s\n", __FUNCTION__);
for (i = 0; i < len; i++) {
iowrite8(vdev, ptr[i], ioaddr + i);
}
}
static u8 vio_legacy_get_status(VirtIODevice *vdev)
{
DPrintf(6, "%s\n", __FUNCTION__);
return ioread8(vdev, vdev->addr + VIRTIO_PCI_STATUS);
}
static void vio_legacy_set_status(VirtIODevice *vdev, u8 status)
{
DPrintf(6, "%s>>> %x\n", __FUNCTION__, status);
iowrite8(vdev, status, vdev->addr + VIRTIO_PCI_STATUS);
}
static void vio_legacy_reset(VirtIODevice *vdev)
{
/* 0 status means a reset. */
iowrite8(vdev, 0, vdev->addr + VIRTIO_PCI_STATUS);
}
static u64 vio_legacy_get_features(VirtIODevice *vdev)
{
return ioread32(vdev, vdev->addr + VIRTIO_PCI_HOST_FEATURES);
}
static NTSTATUS vio_legacy_set_features(VirtIODevice *vdev, u64 features)
{
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev, &features);
/* Make sure we don't have any features > 32 bits! */
ASSERT((u32)features == features);
iowrite32(vdev, (u32)features, vdev->addr + VIRTIO_PCI_GUEST_FEATURES);
return STATUS_SUCCESS;
}
static u16 vio_legacy_set_config_vector(VirtIODevice *vdev, u16 vector)
{
/* Setup the vector used for configuration events */
iowrite16(vdev, vector, vdev->addr + VIRTIO_MSI_CONFIG_VECTOR);
/* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */
return ioread16(vdev, vdev->addr + VIRTIO_MSI_CONFIG_VECTOR);
}
static u16 vio_legacy_set_queue_vector(struct virtqueue *vq, u16 vector)
{
VirtIODevice *vdev = vq->vdev;
iowrite16(vdev, (u16)vq->index, vdev->addr + VIRTIO_PCI_QUEUE_SEL);
iowrite16(vdev, vector, vdev->addr + VIRTIO_MSI_QUEUE_VECTOR);
return ioread16(vdev, vdev->addr + VIRTIO_MSI_QUEUE_VECTOR);
}
static NTSTATUS vio_legacy_query_vq_alloc(VirtIODevice *vdev,
unsigned index,
unsigned short *pNumEntries,
unsigned long *pRingSize,
unsigned long *pHeapSize)
{
unsigned long ring_size, data_size;
u16 num;
/* Select the queue we're interested in */
iowrite16(vdev, (u16)index, vdev->addr + VIRTIO_PCI_QUEUE_SEL);
/* Check if queue is either not available or already active. */
num = ioread16(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NUM);
if (!num || ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_PFN)) {
return STATUS_NOT_FOUND;
}
ring_size = ROUND_TO_PAGES(vring_size(num, VIRTIO_PCI_VRING_ALIGN, false));
data_size = ROUND_TO_PAGES(vring_control_block_size(num, false));
*pNumEntries = num;
*pRingSize = ring_size + data_size;
*pHeapSize = 0;
return STATUS_SUCCESS;
}
static NTSTATUS vio_legacy_setup_vq(struct virtqueue **queue,
VirtIODevice *vdev,
VirtIOQueueInfo *info,
unsigned index,
u16 msix_vec)
{
struct virtqueue *vq;
unsigned long ring_size, heap_size;
NTSTATUS status;
/* Select the queue and query allocation parameters */
status = vio_legacy_query_vq_alloc(vdev, index, &info->num, &ring_size, &heap_size);
if (!NT_SUCCESS(status)) {
return status;
}
info->queue = mem_alloc_contiguous_pages(vdev, ring_size);
if (info->queue == NULL) {
return STATUS_INSUFFICIENT_RESOURCES;
}
/* activate the queue */
iowrite32(vdev, (u32)(mem_get_physical_address(vdev, info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT),
vdev->addr + VIRTIO_PCI_QUEUE_PFN);
/* create the vring */
vq = vring_new_virtqueue_split(index, info->num,
VIRTIO_PCI_VRING_ALIGN, vdev,
info->queue, vp_notify,
(u8 *)info->queue + ROUND_TO_PAGES(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN, false)));
if (!vq) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto err_activate_queue;
}
vq->notification_addr = (void *)(vdev->addr + VIRTIO_PCI_QUEUE_NOTIFY);
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
msix_vec = vdev->device->set_queue_vector(vq, msix_vec);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
status = STATUS_DEVICE_BUSY;
goto err_assign;
}
}
*queue = vq;
return STATUS_SUCCESS;
err_assign:
err_activate_queue:
iowrite32(vdev, 0, vdev->addr + VIRTIO_PCI_QUEUE_PFN);
mem_free_contiguous_pages(vdev, info->queue);
return status;
}
static void vio_legacy_del_vq(VirtIOQueueInfo *info)
{
struct virtqueue *vq = info->vq;
VirtIODevice *vdev = vq->vdev;
iowrite16(vdev, (u16)vq->index, vdev->addr + VIRTIO_PCI_QUEUE_SEL);
if (vdev->msix_used) {
iowrite16(vdev, VIRTIO_MSI_NO_VECTOR,
vdev->addr + VIRTIO_MSI_QUEUE_VECTOR);
/* Flush the write out to device */
ioread8(vdev, vdev->addr + VIRTIO_PCI_ISR);
}
/* Select and deactivate the queue */
iowrite32(vdev, 0, vdev->addr + VIRTIO_PCI_QUEUE_PFN);
mem_free_contiguous_pages(vdev, info->queue);
}
static const struct virtio_device_ops virtio_pci_device_ops = {
/* .get_config = */ vio_legacy_get_config,
/* .set_config = */ vio_legacy_set_config,
/* .get_config_generation = */ NULL,
/* .get_status = */ vio_legacy_get_status,
/* .set_status = */ vio_legacy_set_status,
/* .reset = */ vio_legacy_reset,
/* .get_features = */ vio_legacy_get_features,
/* .set_features = */ vio_legacy_set_features,
/* .set_config_vector = */ vio_legacy_set_config_vector,
/* .set_queue_vector = */ vio_legacy_set_queue_vector,
/* .query_queue_alloc = */ vio_legacy_query_vq_alloc,
/* .setup_queue = */ vio_legacy_setup_vq,
/* .delete_queue = */ vio_legacy_del_vq,
};
/* Legacy device initialization */
NTSTATUS vio_legacy_initialize(VirtIODevice *vdev)
{
size_t length = pci_get_resource_len(vdev, 0);
vdev->addr = (ULONG_PTR)pci_map_address_range(vdev, 0, 0, length);
if (!vdev->addr) {
return STATUS_INSUFFICIENT_RESOURCES;
}
vdev->isr = (u8 *)vdev->addr + VIRTIO_PCI_ISR;
vdev->device = &virtio_pci_device_ops;
return STATUS_SUCCESS;
}

View file

@ -0,0 +1,597 @@
/*
* Virtio PCI driver - modern (virtio 1.0) device support
*
* Copyright IBM Corp. 2007
* Copyright Red Hat, Inc. 2014
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Rusty Russell <rusty@rustcorp.com.au>
* Michael S. Tsirkin <mst@redhat.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "osdep.h"
#define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci.h"
#include "VirtIO.h"
#include "kdebugprint.h"
#include "virtio_ring.h"
#include "virtio_pci_common.h"
#include "windows/virtio_ring_allocation.h"
#include <stddef.h>
#ifdef WPP_EVENT_TRACING
#include "VirtIOPCIModern.tmh"
#endif
static void *vio_modern_map_capability(VirtIODevice *vdev, int cap_offset,
size_t minlen, u32 alignment,
u32 start, u32 size, size_t *len)
{
u8 bar;
u32 bar_offset, bar_length;
void *addr;
pci_read_config_byte(vdev, cap_offset + offsetof(struct virtio_pci_cap, bar), &bar);
pci_read_config_dword(vdev, cap_offset + offsetof(struct virtio_pci_cap, offset), &bar_offset);
pci_read_config_dword(vdev, cap_offset + offsetof(struct virtio_pci_cap, length), &bar_length);
if (start + minlen > bar_length) {
DPrintf(0, "bar %i cap is not large enough to map %zu bytes at offset %u\n", bar, minlen, start);
return NULL;
}
bar_length -= start;
bar_offset += start;
if (bar_offset & (alignment - 1)) {
DPrintf(0, "bar %i offset %u not aligned to %u\n", bar, bar_offset, alignment);
return NULL;
}
if (bar_length > size) {
bar_length = size;
}
if (len) {
*len = bar_length;
}
if (bar_offset + minlen > pci_get_resource_len(vdev, bar)) {
DPrintf(0, "bar %i is not large enough to map %zu bytes at offset %u\n", bar, minlen, bar_offset);
return NULL;
}
addr = pci_map_address_range(vdev, bar, bar_offset, bar_length);
if (!addr) {
DPrintf(0, "unable to map %u bytes at bar %i offset %u\n", bar_length, bar, bar_offset);
}
return addr;
}
static void *vio_modern_map_simple_capability(VirtIODevice *vdev, int cap_offset, size_t length, u32 alignment)
{
return vio_modern_map_capability(
vdev,
cap_offset,
length, // minlen
alignment,
0, // offset
(u32)length, // size is equal to minlen
NULL); // not interested in the full length
}
static void vio_modern_get_config(VirtIODevice *vdev, unsigned offset,
void *buf, unsigned len)
{
if (!vdev->config) {
ASSERT(!"Device has no config to read");
return;
}
if (offset + len > vdev->config_len) {
ASSERT(!"Can't read beyond the config length");
return;
}
switch (len) {
case 1:
*(u8 *)buf = ioread8(vdev, vdev->config + offset);
break;
case 2:
*(u16 *)buf = ioread16(vdev, vdev->config + offset);
break;
case 4:
*(u32 *)buf = ioread32(vdev, vdev->config + offset);
break;
default:
ASSERT(!"Only 1, 2, 4 byte config reads are supported");
}
}
static void vio_modern_set_config(VirtIODevice *vdev, unsigned offset,
const void *buf, unsigned len)
{
if (!vdev->config) {
ASSERT(!"Device has no config to write");
return;
}
if (offset + len > vdev->config_len) {
ASSERT(!"Can't write beyond the config length");
return;
}
switch (len) {
case 1:
iowrite8(vdev, *(u8 *)buf, vdev->config + offset);
break;
case 2:
iowrite16(vdev, *(u16 *)buf, vdev->config + offset);
break;
case 4:
iowrite32(vdev, *(u32 *)buf, vdev->config + offset);
break;
default:
ASSERT(!"Only 1, 2, 4 byte config writes are supported");
}
}
static u32 vio_modern_get_generation(VirtIODevice *vdev)
{
return ioread8(vdev, &vdev->common->config_generation);
}
static u8 vio_modern_get_status(VirtIODevice *vdev)
{
return ioread8(vdev, &vdev->common->device_status);
}
static void vio_modern_set_status(VirtIODevice *vdev, u8 status)
{
/* We should never be setting status to 0. */
ASSERT(status != 0);
iowrite8(vdev, status, &vdev->common->device_status);
}
static void vio_modern_reset(VirtIODevice *vdev)
{
/* 0 status means a reset. */
iowrite8(vdev, 0, &vdev->common->device_status);
/* After writing 0 to device_status, the driver MUST wait for a read of
* device_status to return 0 before reinitializing the device.
* This will flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any.
*/
while (ioread8(vdev, &vdev->common->device_status)) {
vdev_sleep(vdev, 1);
}
}
static u64 vio_modern_get_features(VirtIODevice *vdev)
{
u64 features;
iowrite32(vdev, 0, &vdev->common->device_feature_select);
features = ioread32(vdev, &vdev->common->device_feature);
iowrite32(vdev, 1, &vdev->common->device_feature_select);
features |= ((u64)ioread32(vdev, &vdev->common->device_feature) << 32);
return features;
}
static NTSTATUS vio_modern_set_features(VirtIODevice *vdev, u64 features)
{
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev, &features);
if (!virtio_is_feature_enabled(features, VIRTIO_F_VERSION_1)) {
DPrintf(0, "virtio: device uses modern interface but does not have VIRTIO_F_VERSION_1\n", 0);
return STATUS_INVALID_PARAMETER;
}
iowrite32(vdev, 0, &vdev->common->guest_feature_select);
iowrite32(vdev, (u32)features, &vdev->common->guest_feature);
iowrite32(vdev, 1, &vdev->common->guest_feature_select);
iowrite32(vdev, features >> 32, &vdev->common->guest_feature);
return STATUS_SUCCESS;
}
static u16 vio_modern_set_config_vector(VirtIODevice *vdev, u16 vector)
{
/* Setup the vector used for configuration events */
iowrite16(vdev, vector, &vdev->common->msix_config);
/* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */
return ioread16(vdev, &vdev->common->msix_config);
}
static u16 vio_modern_set_queue_vector(struct virtqueue *vq, u16 vector)
{
VirtIODevice *vdev = vq->vdev;
volatile struct virtio_pci_common_cfg *cfg = vdev->common;
iowrite16(vdev, (u16)vq->index, &cfg->queue_select);
iowrite16(vdev, vector, &cfg->queue_msix_vector);
return ioread16(vdev, &cfg->queue_msix_vector);
}
static size_t vring_pci_size(u16 num, bool packed)
{
/* We only need a cacheline separation. */
return (size_t)ROUND_TO_PAGES(vring_size(num, SMP_CACHE_BYTES, packed));
}
static NTSTATUS vio_modern_query_vq_alloc(VirtIODevice *vdev,
unsigned index,
unsigned short *pNumEntries,
unsigned long *pRingSize,
unsigned long *pHeapSize)
{
volatile struct virtio_pci_common_cfg *cfg = vdev->common;
u16 num;
if (index >= ioread16(vdev, &cfg->num_queues)) {
return STATUS_NOT_FOUND;
}
/* Select the queue we're interested in */
iowrite16(vdev, (u16)index, &cfg->queue_select);
/* Check if queue is either not available or already active. */
num = ioread16(vdev, &cfg->queue_size);
/* QEMU has a bug where queues don't revert to inactive on device
* reset. Skip checking the queue_enable field until it is fixed.
*/
if (!num /*|| ioread16(vdev, &cfg->queue_enable)*/) {
return STATUS_NOT_FOUND;
}
if (num & (num - 1)) {
DPrintf(0, "%p: bad queue size %u", vdev, num);
return STATUS_INVALID_PARAMETER;
}
*pNumEntries = num;
*pRingSize = (unsigned long)vring_pci_size(num, vdev->packed_ring);
*pHeapSize = vring_control_block_size(num, vdev->packed_ring);
return STATUS_SUCCESS;
}
static NTSTATUS vio_modern_setup_vq(struct virtqueue **queue,
VirtIODevice *vdev,
VirtIOQueueInfo *info,
unsigned index,
u16 msix_vec)
{
volatile struct virtio_pci_common_cfg *cfg = vdev->common;
struct virtqueue *vq;
void *vq_addr;
u16 off;
unsigned long ring_size, heap_size;
NTSTATUS status;
/* select the queue and query allocation parameters */
status = vio_modern_query_vq_alloc(vdev, index, &info->num, &ring_size, &heap_size);
if (!NT_SUCCESS(status)) {
return status;
}
/* get offset of notification word for this vq */
off = ioread16(vdev, &cfg->queue_notify_off);
/* try to allocate contiguous pages, scale down on failure */
while (!(info->queue = mem_alloc_contiguous_pages(vdev, vring_pci_size(info->num, vdev->packed_ring)))) {
if (info->num > 0) {
info->num /= 2;
} else {
return STATUS_INSUFFICIENT_RESOURCES;
}
}
vq_addr = mem_alloc_nonpaged_block(vdev, heap_size);
if (vq_addr == NULL) {
return STATUS_INSUFFICIENT_RESOURCES;
}
/* create the vring */
if (vdev->packed_ring) {
vq = vring_new_virtqueue_packed(index, info->num,
SMP_CACHE_BYTES, vdev,
info->queue, vp_notify, vq_addr);
} else {
vq = vring_new_virtqueue_split(index, info->num,
SMP_CACHE_BYTES, vdev,
info->queue, vp_notify, vq_addr);
}
if (!vq) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto err_new_queue;
}
/* activate the queue */
iowrite16(vdev, info->num, &cfg->queue_size);
iowrite64_twopart(vdev, mem_get_physical_address(vdev, info->queue),
&cfg->queue_desc_lo, &cfg->queue_desc_hi);
iowrite64_twopart(vdev, mem_get_physical_address(vdev, vq->avail_va),
&cfg->queue_avail_lo, &cfg->queue_avail_hi);
iowrite64_twopart(vdev, mem_get_physical_address(vdev, vq->used_va),
&cfg->queue_used_lo, &cfg->queue_used_hi);
if (vdev->notify_base) {
/* offset should not wrap */
if ((u64)off * vdev->notify_offset_multiplier + 2
> vdev->notify_len) {
DPrintf(0,
"%p: bad notification offset %u (x %u) "
"for queue %u > %zd",
vdev,
off, vdev->notify_offset_multiplier,
index, vdev->notify_len);
status = STATUS_INVALID_PARAMETER;
goto err_map_notify;
}
vq->notification_addr = (void *)(vdev->notify_base +
off * vdev->notify_offset_multiplier);
} else {
vq->notification_addr = vio_modern_map_capability(vdev,
vdev->notify_map_cap, 2, 2,
off * vdev->notify_offset_multiplier, 2,
NULL);
}
if (!vq->notification_addr) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto err_map_notify;
}
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
msix_vec = vdev->device->set_queue_vector(vq, msix_vec);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
status = STATUS_DEVICE_BUSY;
goto err_assign_vector;
}
}
/* enable the queue */
iowrite16(vdev, 1, &vdev->common->queue_enable);
*queue = vq;
return STATUS_SUCCESS;
err_assign_vector:
err_map_notify:
virtqueue_shutdown(vq);
err_new_queue:
mem_free_nonpaged_block(vdev, vq_addr);
mem_free_contiguous_pages(vdev, info->queue);
return status;
}
static void vio_modern_del_vq(VirtIOQueueInfo *info)
{
struct virtqueue *vq = info->vq;
VirtIODevice *vdev = vq->vdev;
iowrite16(vdev, (u16)vq->index, &vdev->common->queue_select);
if (vdev->msix_used) {
iowrite16(vdev, VIRTIO_MSI_NO_VECTOR, &vdev->common->queue_msix_vector);
/* Flush the write out to device */
ioread16(vdev, &vdev->common->queue_msix_vector);
}
virtqueue_shutdown(vq);
mem_free_nonpaged_block(vdev, vq);
mem_free_contiguous_pages(vdev, info->queue);
}
static const struct virtio_device_ops virtio_pci_device_ops = {
/* .get_config = */ vio_modern_get_config,
/* .set_config = */ vio_modern_set_config,
/* .get_config_generation = */ vio_modern_get_generation,
/* .get_status = */ vio_modern_get_status,
/* .set_status = */ vio_modern_set_status,
/* .reset = */ vio_modern_reset,
/* .get_features = */ vio_modern_get_features,
/* .set_features = */ vio_modern_set_features,
/* .set_config_vector = */ vio_modern_set_config_vector,
/* .set_queue_vector = */ vio_modern_set_queue_vector,
/* .query_queue_alloc = */ vio_modern_query_vq_alloc,
/* .setup_queue = */ vio_modern_setup_vq,
/* .delete_queue = */ vio_modern_del_vq,
};
static u8 find_next_pci_vendor_capability(VirtIODevice *vdev, u8 offset)
{
u8 id = 0;
int iterations = 48;
if (pci_read_config_byte(vdev, offset, &offset) != 0) {
return 0;
}
while (iterations-- && offset >= 0x40) {
offset &= ~3;
if (pci_read_config_byte(vdev, offset + offsetof(PCI_CAPABILITIES_HEADER,
CapabilityID), &id) != 0) {
break;
}
if (id == 0xFF) {
break;
}
if (id == PCI_CAPABILITY_ID_VENDOR_SPECIFIC) {
return offset;
}
if (pci_read_config_byte(vdev, offset + offsetof(PCI_CAPABILITIES_HEADER,
Next), &offset) != 0) {
break;
}
}
return 0;
}
static u8 find_first_pci_vendor_capability(VirtIODevice *vdev)
{
u8 hdr_type, offset;
u16 status;
if (pci_read_config_byte(vdev, offsetof(PCI_COMMON_HEADER, HeaderType), &hdr_type) != 0) {
return 0;
}
if (pci_read_config_word(vdev, offsetof(PCI_COMMON_HEADER, Status), &status) != 0) {
return 0;
}
if ((status & PCI_STATUS_CAPABILITIES_LIST) == 0) {
return 0;
}
switch (hdr_type & ~PCI_MULTIFUNCTION) {
case PCI_BRIDGE_TYPE:
offset = offsetof(PCI_COMMON_HEADER, u.type1.CapabilitiesPtr);
break;
case PCI_CARDBUS_BRIDGE_TYPE:
offset = offsetof(PCI_COMMON_HEADER, u.type2.CapabilitiesPtr);
break;
default:
offset = offsetof(PCI_COMMON_HEADER, u.type0.CapabilitiesPtr);
break;
}
if (offset != 0) {
offset = find_next_pci_vendor_capability(vdev, offset);
}
return offset;
}
/* Populate Offsets with virtio vendor capability offsets within the PCI config space */
static void find_pci_vendor_capabilities(VirtIODevice *vdev, int *Offsets, size_t nOffsets)
{
u8 offset = find_first_pci_vendor_capability(vdev);
while (offset > 0) {
u8 cfg_type, bar;
pci_read_config_byte(vdev, offset + offsetof(struct virtio_pci_cap, cfg_type), &cfg_type);
pci_read_config_byte(vdev, offset + offsetof(struct virtio_pci_cap, bar), &bar);
if (bar < PCI_TYPE0_ADDRESSES &&
cfg_type < nOffsets &&
pci_get_resource_len(vdev, bar) > 0) {
Offsets[cfg_type] = offset;
}
offset = find_next_pci_vendor_capability(vdev, offset + offsetof(PCI_CAPABILITIES_HEADER, Next));
}
}
/* Modern device initialization */
NTSTATUS vio_modern_initialize(VirtIODevice *vdev)
{
int capabilities[VIRTIO_PCI_CAP_PCI_CFG];
u32 notify_length;
u32 notify_offset;
RtlZeroMemory(capabilities, sizeof(capabilities));
find_pci_vendor_capabilities(vdev, capabilities, VIRTIO_PCI_CAP_PCI_CFG);
/* Check for a common config, if not found use legacy mode */
if (!capabilities[VIRTIO_PCI_CAP_COMMON_CFG]) {
DPrintf(0, "%s(%p): device not found\n", __FUNCTION__, vdev);
return STATUS_DEVICE_NOT_CONNECTED;
}
/* Check isr and notify caps, if not found fail */
if (!capabilities[VIRTIO_PCI_CAP_ISR_CFG] || !capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG]) {
DPrintf(0, "%s(%p): missing capabilities %i/%i/%i\n",
__FUNCTION__, vdev,
capabilities[VIRTIO_PCI_CAP_COMMON_CFG],
capabilities[VIRTIO_PCI_CAP_ISR_CFG],
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG]);
return STATUS_INVALID_PARAMETER;
}
/* Map bars according to the capabilities */
vdev->common = vio_modern_map_simple_capability(vdev,
capabilities[VIRTIO_PCI_CAP_COMMON_CFG],
sizeof(struct virtio_pci_common_cfg), 4);
if (!vdev->common) {
return STATUS_INVALID_PARAMETER;
}
vdev->isr = vio_modern_map_simple_capability(vdev,
capabilities[VIRTIO_PCI_CAP_ISR_CFG],
sizeof(u8), 1);
if (!vdev->isr) {
return STATUS_INVALID_PARAMETER;
}
/* Read notify_off_multiplier from config space. */
pci_read_config_dword(vdev,
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG] + offsetof(struct virtio_pci_notify_cap,
notify_off_multiplier),
&vdev->notify_offset_multiplier);
/* Read notify length and offset from config space. */
pci_read_config_dword(vdev,
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG] + offsetof(struct virtio_pci_notify_cap,
cap.length),
&notify_length);
pci_read_config_dword(vdev,
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG] + offsetof(struct virtio_pci_notify_cap,
cap.offset),
&notify_offset);
/* Map the notify capability if it's small enough.
* Otherwise, map each VQ individually later.
*/
if (notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
vdev->notify_base = vio_modern_map_capability(vdev,
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG], 2, 2,
0, notify_length,
&vdev->notify_len);
if (!vdev->notify_base) {
return STATUS_INVALID_PARAMETER;
}
} else {
vdev->notify_map_cap = capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG];
}
/* Map the device config capability, the PAGE_SIZE size is a guess */
if (capabilities[VIRTIO_PCI_CAP_DEVICE_CFG]) {
vdev->config = vio_modern_map_capability(vdev,
capabilities[VIRTIO_PCI_CAP_DEVICE_CFG], 0, 4,
0, PAGE_SIZE,
&vdev->config_len);
if (!vdev->config) {
return STATUS_INVALID_PARAMETER;
}
}
vdev->device = &virtio_pci_device_ops;
return STATUS_SUCCESS;
}

View file

@ -0,0 +1,651 @@
/*
* Packed virtio ring manipulation routines
*
* Copyright 2019 Red Hat, Inc.
*
* Authors:
* Yuri Benditovich <ybendito@redhat.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "osdep.h"
#include "virtio_pci.h"
#include "VirtIO.h"
#include "kdebugprint.h"
#include "virtio_ring.h"
#include "windows/virtio_ring_allocation.h"
#include <pshpack1.h>
struct vring_packed_desc_event {
/* Descriptor Ring Change Event Offset/Wrap Counter. */
__le16 off_wrap;
/* Descriptor Ring Change Event Flags. */
__le16 flags;
};
struct vring_packed_desc {
/* Buffer Address. */
__virtio64 addr;
/* Buffer Length. */
__le32 len;
/* Buffer ID. */
__le16 id;
/* The flags depending on descriptor type. */
__le16 flags;
};
#include <poppack.h>
#define BUG_ON(condition) { if (condition) { KeBugCheck(0xE0E1E2E3); }}
#define BAD_RING(vq, fmt, ...) DPrintf(0, "%s: queue %d: " fmt, __FUNCTION__, vq->vq.index, __VA_ARGS__); BUG_ON(true)
/* This marks a buffer as continuing via the next field. */
#define VRING_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
#define VRING_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
/*
* Mark a descriptor as available or used in packed ring.
* Notice: they are defined as shifts instead of shifted values.
*/
#define VRING_PACKED_DESC_F_AVAIL 7
#define VRING_PACKED_DESC_F_USED 15
/* Enable events in packed ring. */
#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
/* Disable events in packed ring. */
#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
/*
* Enable events for a specific descriptor in packed ring.
* (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
* Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
*/
#define VRING_PACKED_EVENT_FLAG_DESC 0x2
/*
* Wrap counter bit shift in event suppression structure
* of packed ring.
*/
#define VRING_PACKED_EVENT_F_WRAP_CTR 15
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
* should we trigger an event?
*/
static inline bool vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
{
/* Note: Xen has similar logic for notification hold-off
* in include/xen/interface/io/ring.h with req_event and req_prod
* corresponding to event_idx + 1 and new_idx respectively.
* Note also that req_event and req_prod in Xen start at 1,
* event indexes in virtio start at 0. */
return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
}
struct vring_desc_state_packed {
void *data; /* Data for callback. */
u16 num; /* Descriptor list length. */
u16 next; /* The next desc state in a list. */
u16 last; /* The last desc state in a list. */
};
struct virtqueue_packed {
struct virtqueue vq;
/* Number we've added since last sync. */
unsigned int num_added;
/* Head of free buffer list. */
unsigned int free_head;
/* Number of free descriptors */
unsigned int num_free;
/* Last used index we've seen. */
u16 last_used_idx;
/* Avail used flags. */
u16 avail_used_flags;
struct
{
/* Driver ring wrap counter. */
bool avail_wrap_counter;
/* Device ring wrap counter. */
bool used_wrap_counter;
/* Index of the next avail descriptor. */
u16 next_avail_idx;
/*
* Last written value to driver->flags in
* guest byte order.
*/
u16 event_flags_shadow;
struct {
unsigned int num;
struct vring_packed_desc *desc;
struct vring_packed_desc_event *driver;
struct vring_packed_desc_event *device;
} vring;
/* Per-descriptor state. */
struct vring_desc_state_packed *desc_state;
} packed;
struct vring_desc_state_packed desc_states[];
};
#define packedvq(vq) ((struct virtqueue_packed *)vq)
unsigned int vring_control_block_size_packed(u16 qsize)
{
return sizeof(struct virtqueue_packed) + sizeof(struct vring_desc_state_packed) * qsize;
}
unsigned long vring_size_packed(unsigned int num, unsigned long align)
{
/* array of descriptors */
unsigned long res = num * sizeof(struct vring_packed_desc);
/* driver and device event */
res += 2 * sizeof(struct vring_packed_desc_event);
return res;
}
static int virtqueue_add_buf_packed(
struct virtqueue *_vq, /* the queue */
struct scatterlist sg[], /* sg array of length out + in */
unsigned int out, /* number of driver->device buffer descriptors in sg */
unsigned int in, /* number of device->driver buffer descriptors in sg */
void *opaque, /* later returned from virtqueue_get_buf */
void *va_indirect, /* VA of the indirect page or NULL */
ULONGLONG phys_indirect) /* PA of the indirect page or 0 */
{
struct virtqueue_packed *vq = packedvq(_vq);
unsigned int descs_used;
struct vring_packed_desc *desc;
u16 head, id, i;
descs_used = out + in;
head = vq->packed.next_avail_idx;
id = (u16)vq->free_head;
BUG_ON(descs_used == 0);
BUG_ON(id >= vq->packed.vring.num);
if (va_indirect && vq->num_free > 0) {
desc = va_indirect;
for (i = 0; i < descs_used; i++) {
desc[i].flags = i < out ? 0 : VRING_DESC_F_WRITE;
desc[i].addr = sg[i].physAddr.QuadPart;
desc[i].len = sg[i].length;
}
vq->packed.vring.desc[head].addr = phys_indirect;
vq->packed.vring.desc[head].len = descs_used * sizeof(struct vring_packed_desc);
vq->packed.vring.desc[head].id = id;
KeMemoryBarrier();
vq->packed.vring.desc[head].flags = VRING_DESC_F_INDIRECT | vq->avail_used_flags;
DPrintf(5, "Added buffer head %i to Q%d\n", head, vq->vq.index);
head++;
if (head >= vq->packed.vring.num) {
head = 0;
vq->packed.avail_wrap_counter ^= 1;
vq->avail_used_flags ^=
1 << VRING_PACKED_DESC_F_AVAIL |
1 << VRING_PACKED_DESC_F_USED;
}
vq->packed.next_avail_idx = head;
/* We're using some buffers from the free list. */
vq->num_free -= 1;
vq->num_added += 1;
vq->free_head = vq->packed.desc_state[id].next;
/* Store token and indirect buffer state. */
vq->packed.desc_state[id].num = 1;
vq->packed.desc_state[id].data = opaque;
vq->packed.desc_state[id].last = id;
} else {
unsigned int n;
u16 curr, prev, head_flags;
if (vq->num_free < descs_used) {
DPrintf(6, "Can't add buffer to Q%d\n", vq->vq.index);
return -ENOSPC;
}
desc = vq->packed.vring.desc;
i = head;
curr = id;
for (n = 0; n < descs_used; n++) {
u16 flags = vq->avail_used_flags;
flags |= n < out ? 0 : VRING_DESC_F_WRITE;
if (n != descs_used - 1) {
flags |= VRING_DESC_F_NEXT;
}
desc[i].addr = sg[n].physAddr.QuadPart;
desc[i].len = sg[n].length;
desc[i].id = id;
if (n == 0) {
head_flags = flags;
}
else {
desc[i].flags = flags;
}
prev = curr;
curr = vq->packed.desc_state[curr].next;
if (++i >= vq->packed.vring.num) {
i = 0;
vq->avail_used_flags ^=
1 << VRING_PACKED_DESC_F_AVAIL |
1 << VRING_PACKED_DESC_F_USED;
}
}
if (i < head)
vq->packed.avail_wrap_counter ^= 1;
/* We're using some buffers from the free list. */
vq->num_free -= descs_used;
/* Update free pointer */
vq->packed.next_avail_idx = i;
vq->free_head = curr;
/* Store token. */
vq->packed.desc_state[id].num = (u16)descs_used;
vq->packed.desc_state[id].data = opaque;
vq->packed.desc_state[id].last = prev;
/*
* A driver MUST NOT make the first descriptor in the list
* available before all subsequent descriptors comprising
* the list are made available.
*/
KeMemoryBarrier();
vq->packed.vring.desc[head].flags = head_flags;
vq->num_added += descs_used;
DPrintf(5, "Added buffer head @%i+%d to Q%d\n", head, descs_used, vq->vq.index);
}
return 0;
}
static void detach_buf_packed(struct virtqueue_packed *vq, unsigned int id)
{
struct vring_desc_state_packed *state = &vq->packed.desc_state[id];
/* Clear data ptr. */
state->data = NULL;
vq->packed.desc_state[state->last].next = (u16)vq->free_head;
vq->free_head = id;
vq->num_free += state->num;
}
static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
{
struct virtqueue_packed *vq = packedvq(_vq);
unsigned int i;
void *buf;
for (i = 0; i < vq->packed.vring.num; i++) {
if (!vq->packed.desc_state[i].data)
continue;
/* detach_buf clears data, so grab it now. */
buf = vq->packed.desc_state[i].data;
detach_buf_packed(vq, i);
return buf;
}
/* That should have freed everything. */
BUG_ON(vq->num_free != vq->packed.vring.num);
return NULL;
}
static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
{
struct virtqueue_packed *vq = packedvq(_vq);
if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
}
}
static inline bool is_used_desc_packed(const struct virtqueue_packed *vq,
u16 idx, bool used_wrap_counter)
{
bool avail, used;
u16 flags;
flags = vq->packed.vring.desc[idx].flags;
avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
return avail == used && used == used_wrap_counter;
}
static inline bool virtqueue_poll_packed(struct virtqueue_packed *vq, u16 off_wrap)
{
bool wrap_counter;
u16 used_idx;
KeMemoryBarrier();
wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
return is_used_desc_packed(vq, used_idx, wrap_counter);
}
static inline unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue_packed *vq)
{
bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
/*
* We optimistically turn back on interrupts, then check if there was
* more to do.
*/
if (event_suppression_enabled) {
vq->packed.vring.driver->off_wrap =
vq->last_used_idx |
(vq->packed.used_wrap_counter <<
VRING_PACKED_EVENT_F_WRAP_CTR);
/*
* We need to update event offset and event wrap
* counter first before updating event flags.
*/
KeMemoryBarrier();
}
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = event_suppression_enabled ?
VRING_PACKED_EVENT_FLAG_DESC :
VRING_PACKED_EVENT_FLAG_ENABLE;
vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
}
return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
VRING_PACKED_EVENT_F_WRAP_CTR);
}
static bool virtqueue_enable_cb_packed(struct virtqueue *_vq)
{
struct virtqueue_packed *vq = packedvq(_vq);
unsigned last_used_idx = virtqueue_enable_cb_prepare_packed(vq);
return !virtqueue_poll_packed(vq, (u16)last_used_idx);
}
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
{
struct virtqueue_packed *vq = packedvq(_vq);
bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
u16 used_idx, wrap_counter;
u16 bufs;
/*
* We optimistically turn back on interrupts, then check if there was
* more to do.
*/
if (event_suppression_enabled) {
/* TODO: tune this threshold */
bufs = (vq->packed.vring.num - vq->num_free) * 3 / 4;
wrap_counter = vq->packed.used_wrap_counter;
used_idx = vq->last_used_idx + bufs;
if (used_idx >= vq->packed.vring.num) {
used_idx -= (u16)vq->packed.vring.num;
wrap_counter ^= 1;
}
vq->packed.vring.driver->off_wrap = used_idx |
(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR);
/*
* We need to update event offset and event wrap
* counter first before updating event flags.
*/
KeMemoryBarrier();
}
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = event_suppression_enabled ?
VRING_PACKED_EVENT_FLAG_DESC :
VRING_PACKED_EVENT_FLAG_ENABLE;
vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
}
/*
* We need to update event suppression structure first
* before re-checking for more used buffers.
*/
KeMemoryBarrier();
if (is_used_desc_packed(vq,
vq->last_used_idx,
vq->packed.used_wrap_counter)) {
return false;
}
return true;
}
static BOOLEAN virtqueue_is_interrupt_enabled_packed(struct virtqueue *_vq)
{
struct virtqueue_packed *vq = packedvq(_vq);
return vq->packed.event_flags_shadow & VRING_PACKED_EVENT_FLAG_DISABLE;
}
static void virtqueue_shutdown_packed(struct virtqueue *_vq)
{
struct virtqueue_packed *vq = packedvq(_vq);
unsigned int num = vq->packed.vring.num;
void *pages = vq->packed.vring.desc;
unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES;
RtlZeroMemory(pages, vring_size_packed(num, vring_align));
vring_new_virtqueue_packed(
_vq->index,
num,
vring_align,
_vq->vdev,
pages,
_vq->notification_cb,
_vq);
}
static inline bool more_used_packed(const struct virtqueue_packed *vq)
{
return is_used_desc_packed(vq, vq->last_used_idx,
vq->packed.used_wrap_counter);
}
static void *virtqueue_get_buf_packed(
struct virtqueue *_vq, /* the queue */
unsigned int *len) /* number of bytes returned by the device */
{
struct virtqueue_packed *vq = packedvq(_vq);
u16 last_used, id;
void *ret;
if (!more_used_packed(vq)) {
DPrintf(6, "%s: No more buffers in queue\n", __FUNCTION__);
return NULL;
}
/* Only get used elements after they have been exposed by host. */
KeMemoryBarrier();
last_used = vq->last_used_idx;
id = vq->packed.vring.desc[last_used].id;
*len = vq->packed.vring.desc[last_used].len;
if (id >= vq->packed.vring.num) {
BAD_RING(vq, "id %u out of range\n", id);
return NULL;
}
if (!vq->packed.desc_state[id].data) {
BAD_RING(vq, "id %u is not a head!\n", id);
return NULL;
}
/* detach_buf_packed clears data, so grab it now. */
ret = vq->packed.desc_state[id].data;
detach_buf_packed(vq, id);
vq->last_used_idx += vq->packed.desc_state[id].num;
if (vq->last_used_idx >= vq->packed.vring.num) {
vq->last_used_idx -= (u16)vq->packed.vring.num;
vq->packed.used_wrap_counter ^= 1;
}
/*
* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
* the read in the next get_buf call.
*/
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) {
vq->packed.vring.driver->off_wrap = vq->last_used_idx |
((u16)vq->packed.used_wrap_counter <<
VRING_PACKED_EVENT_F_WRAP_CTR);
KeMemoryBarrier();
}
return ret;
}
static BOOLEAN virtqueue_has_buf_packed(struct virtqueue *_vq)
{
struct virtqueue_packed *vq = packedvq(_vq);
return more_used_packed(vq);
}
static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
{
struct virtqueue_packed *vq = packedvq(_vq);
u16 new, old, off_wrap, flags, wrap_counter, event_idx;
bool needs_kick;
union {
struct {
__le16 off_wrap;
__le16 flags;
};
u32 value32;
} snapshot;
/*
* We need to expose the new flags value before checking notification
* suppressions.
*/
KeMemoryBarrier();
old = vq->packed.next_avail_idx - vq->num_added;
new = vq->packed.next_avail_idx;
vq->num_added = 0;
snapshot.value32 = *(u32 *)vq->packed.vring.device;
flags = snapshot.flags;
if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
goto out;
}
off_wrap = snapshot.off_wrap;
wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
if (wrap_counter != vq->packed.avail_wrap_counter)
event_idx -= (u16)vq->packed.vring.num;
needs_kick = vring_need_event(event_idx, new, old);
out:
return needs_kick;
}
static void virtqueue_kick_always_packed(struct virtqueue *_vq)
{
struct virtqueue_packed *vq = packedvq(_vq);
KeMemoryBarrier();
vq->num_added = 0;
virtqueue_notify(_vq);
}
/* Initializes a new virtqueue using already allocated memory */
struct virtqueue *vring_new_virtqueue_packed(
unsigned int index, /* virtqueue index */
unsigned int num, /* virtqueue size (always a power of 2) */
unsigned int vring_align, /* vring alignment requirement */
VirtIODevice *vdev, /* the virtio device owning the queue */
void *pages, /* vring memory */
void(*notify)(struct virtqueue *), /* notification callback */
void *control) /* virtqueue memory */
{
struct virtqueue_packed *vq = packedvq(control);
unsigned int i;
vq->vq.vdev = vdev;
vq->vq.notification_cb = notify;
vq->vq.index = index;
vq->vq.avail_va = (u8 *)pages + num * sizeof(struct vring_packed_desc);
vq->vq.used_va = (u8 *)vq->vq.avail_va + sizeof(struct vring_packed_desc_event);
/* initialize the ring */
vq->packed.vring.num = num;
vq->packed.vring.desc = pages;
vq->packed.vring.driver = vq->vq.avail_va;
vq->packed.vring.device = vq->vq.used_va;
vq->num_free = num;
vq->free_head = 0;
vq->num_added = 0;
vq->packed.avail_wrap_counter = 1;
vq->packed.used_wrap_counter = 1;
vq->last_used_idx = 0;
vq->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
vq->packed.next_avail_idx = 0;
vq->packed.event_flags_shadow = 0;
vq->packed.desc_state = vq->desc_states;
RtlZeroMemory(vq->packed.desc_state, num * sizeof(*vq->packed.desc_state));
for (i = 0; i < num - 1; i++) {
vq->packed.desc_state[i].next = i + 1;
}
vq->vq.add_buf = virtqueue_add_buf_packed;
vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_packed;
vq->vq.disable_cb = virtqueue_disable_cb_packed;
vq->vq.enable_cb = virtqueue_enable_cb_packed;
vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_packed;
vq->vq.get_buf = virtqueue_get_buf_packed;
vq->vq.has_buf = virtqueue_has_buf_packed;
vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_packed;
vq->vq.kick_always = virtqueue_kick_always_packed;
vq->vq.kick_prepare = virtqueue_kick_prepare_packed;
vq->vq.shutdown = virtqueue_shutdown_packed;
return &vq->vq;
}

View file

@ -0,0 +1,562 @@
/*
* Virtio ring manipulation routines
*
* Copyright 2017 Red Hat, Inc.
*
* Authors:
* Ladi Prosek <lprosek@redhat.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "osdep.h"
#include "virtio_pci.h"
#include "VirtIO.h"
#include "kdebugprint.h"
#include "virtio_ring.h"
#include "windows/virtio_ring_allocation.h"
#define DESC_INDEX(num, i) ((i) & ((num) - 1))
/* This marks a buffer as continuing via the next field. */
#define VIRTQ_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
#define VIRTQ_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
#define VIRTQ_DESC_F_INDIRECT 4
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
* will still kick if it's out of buffers. */
#define VIRTQ_USED_F_NO_NOTIFY 1
/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
* when you consume a buffer. It's unreliable, so it's simply an
* optimization. */
#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
#pragma warning (push)
#pragma warning (disable:4200)
#include <pshpack1.h>
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc {
/* Address (guest-physical). */
__virtio64 addr;
/* Length. */
__virtio32 len;
/* The flags as indicated above. */
__virtio16 flags;
/* We chain unused descriptors via this, too */
__virtio16 next;
};
struct vring_avail {
__virtio16 flags;
__virtio16 idx;
__virtio16 ring[];
};
/* u32 is used here for ids for padding reasons. */
struct vring_used_elem {
/* Index of start of used descriptor chain. */
__virtio32 id;
/* Total length of the descriptor chain which was used (written to) */
__virtio32 len;
};
struct vring_used {
__virtio16 flags;
__virtio16 idx;
struct vring_used_elem ring[];
};
#include <poppack.h>
/* Alignment requirements for vring elements.
* When using pre-virtio 1.0 layout, these fall out naturally.
*/
#define VRING_AVAIL_ALIGN_SIZE 2
#define VRING_USED_ALIGN_SIZE 4
#define VRING_DESC_ALIGN_SIZE 16
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
*
* struct vring
* {
* // The actual descriptors (16 bytes each)
* struct vring_desc desc[num];
*
* // A ring of available descriptor heads with free-running index.
* __virtio16 avail_flags;
* __virtio16 avail_idx;
* __virtio16 available[num];
* __virtio16 used_event_idx;
*
* // Padding to the next align boundary.
* char pad[];
*
* // A ring of used descriptor heads with free-running index.
* __virtio16 used_flags;
* __virtio16 used_idx;
* struct vring_used_elem used[num];
* __virtio16 avail_event_idx;
* };
*/
/* We publish the used event index at the end of the available ring, and vice
* versa. They are at the end for backwards compatibility. */
struct vring {
unsigned int num;
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
};
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
#define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
static inline void vring_init(struct vring *vr, unsigned int num, void *p,
unsigned long align)
{
vr->num = num;
vr->desc = (struct vring_desc *)p;
vr->avail = (struct vring_avail *)((__u8 *)p + num * sizeof(struct vring_desc));
vr->used = (struct vring_used *)(((ULONG_PTR)&vr->avail->ring[num] + sizeof(__virtio16)
+ align - 1) & ~((ULONG_PTR)align - 1));
}
static inline unsigned vring_size_split(unsigned int num, unsigned long align)
{
#pragma warning (push)
#pragma warning (disable:4319)
return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num)
+ align - 1) & ~(align - 1))
+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
#pragma warning(pop)
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
* should we trigger an event? */
static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
{
/* Note: Xen has similar logic for notification hold-off
* in include/xen/interface/io/ring.h with req_event and req_prod
* corresponding to event_idx + 1 and new_idx respectively.
* Note also that req_event and req_prod in Xen start at 1,
* event indexes in virtio start at 0. */
return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
}
struct virtqueue_split {
struct virtqueue vq;
struct vring vring;
struct {
u16 flags;
u16 idx;
} master_vring_avail;
unsigned int num_unused;
unsigned int num_added_since_kick;
u16 first_unused;
u16 last_used;
void *opaque[];
};
#define splitvq(vq) ((struct virtqueue_split *)vq)
#pragma warning (pop)
/* Returns the index of the first unused descriptor */
static inline u16 get_unused_desc(struct virtqueue_split *vq)
{
u16 idx = vq->first_unused;
ASSERT(vq->num_unused > 0);
vq->first_unused = vq->vring.desc[idx].next;
vq->num_unused--;
return idx;
}
/* Marks the descriptor chain starting at index idx as unused */
static inline void put_unused_desc_chain(struct virtqueue_split *vq, u16 idx)
{
u16 start = idx;
vq->opaque[idx] = NULL;
while (vq->vring.desc[idx].flags & VIRTQ_DESC_F_NEXT) {
idx = vq->vring.desc[idx].next;
vq->num_unused++;
}
vq->vring.desc[idx].flags = VIRTQ_DESC_F_NEXT;
vq->vring.desc[idx].next = vq->first_unused;
vq->num_unused++;
vq->first_unused = start;
}
/* Adds a buffer to a virtqueue, returns 0 on success, negative number on error */
static int virtqueue_add_buf_split(
struct virtqueue *_vq, /* the queue */
struct scatterlist sg[], /* sg array of length out + in */
unsigned int out, /* number of driver->device buffer descriptors in sg */
unsigned int in, /* number of device->driver buffer descriptors in sg */
void *opaque, /* later returned from virtqueue_get_buf */
void *va_indirect, /* VA of the indirect page or NULL */
ULONGLONG phys_indirect) /* PA of the indirect page or 0 */
{
struct virtqueue_split *vq = splitvq(_vq);
struct vring *vring = &vq->vring;
unsigned int i;
u16 idx;
if (va_indirect && (out + in) > 1 && vq->num_unused > 0) {
/* Use one indirect descriptor */
struct vring_desc *desc = (struct vring_desc *)va_indirect;
for (i = 0; i < out + in; i++) {
desc[i].flags = (i < out ? 0 : VIRTQ_DESC_F_WRITE);
desc[i].flags |= VIRTQ_DESC_F_NEXT;
desc[i].addr = sg[i].physAddr.QuadPart;
desc[i].len = sg[i].length;
desc[i].next = (u16)i + 1;
}
desc[i - 1].flags &= ~VIRTQ_DESC_F_NEXT;
idx = get_unused_desc(vq);
vq->vring.desc[idx].flags = VIRTQ_DESC_F_INDIRECT;
vq->vring.desc[idx].addr = phys_indirect;
vq->vring.desc[idx].len = i * sizeof(struct vring_desc);
vq->opaque[idx] = opaque;
} else {
u16 last_idx;
/* Use out + in regular descriptors */
if (out + in > vq->num_unused) {
return -ENOSPC;
}
/* First descriptor */
idx = last_idx = get_unused_desc(vq);
vq->opaque[idx] = opaque;
vring->desc[idx].addr = sg[0].physAddr.QuadPart;
vring->desc[idx].len = sg[0].length;
vring->desc[idx].flags = VIRTQ_DESC_F_NEXT;
if (out == 0) {
vring->desc[idx].flags |= VIRTQ_DESC_F_WRITE;
}
vring->desc[idx].next = vq->first_unused;
/* The rest of descriptors */
for (i = 1; i < out + in; i++) {
last_idx = get_unused_desc(vq);
vring->desc[last_idx].addr = sg[i].physAddr.QuadPart;
vring->desc[last_idx].len = sg[i].length;
vring->desc[last_idx].flags = VIRTQ_DESC_F_NEXT;
if (i >= out) {
vring->desc[last_idx].flags |= VIRTQ_DESC_F_WRITE;
}
vring->desc[last_idx].next = vq->first_unused;
}
vring->desc[last_idx].flags &= ~VIRTQ_DESC_F_NEXT;
}
/* Write the first descriptor into the available ring */
vring->avail->ring[DESC_INDEX(vring->num, vq->master_vring_avail.idx)] = idx;
KeMemoryBarrier();
vring->avail->idx = ++vq->master_vring_avail.idx;
vq->num_added_since_kick++;
return 0;
}
/* Gets the opaque pointer associated with a returned buffer, or NULL if no buffer is available */
static void *virtqueue_get_buf_split(
struct virtqueue *_vq, /* the queue */
unsigned int *len) /* number of bytes returned by the device */
{
struct virtqueue_split *vq = splitvq(_vq);
void *opaque;
u16 idx;
if (vq->last_used == (int)vq->vring.used->idx) {
/* No descriptor index in the used ring */
return NULL;
}
KeMemoryBarrier();
idx = DESC_INDEX(vq->vring.num, vq->last_used);
*len = vq->vring.used->ring[idx].len;
/* Get the first used descriptor */
idx = (u16)vq->vring.used->ring[idx].id;
opaque = vq->opaque[idx];
/* Put all descriptors back to the free list */
put_unused_desc_chain(vq, idx);
vq->last_used++;
if (_vq->vdev->event_suppression_enabled && virtqueue_is_interrupt_enabled(_vq)) {
vring_used_event(&vq->vring) = vq->last_used;
KeMemoryBarrier();
}
ASSERT(opaque != NULL);
return opaque;
}
/* Returns true if at least one returned buffer is available, false otherwise */
static BOOLEAN virtqueue_has_buf_split(struct virtqueue *_vq)
{
struct virtqueue_split *vq = splitvq(_vq);
return (vq->last_used != vq->vring.used->idx);
}
/* Returns true if the device should be notified, false otherwise */
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
{
struct virtqueue_split *vq = splitvq(_vq);
bool wrap_around;
u16 old, new;
KeMemoryBarrier();
wrap_around = (vq->num_added_since_kick >= (1 << 16));
old = (u16)(vq->master_vring_avail.idx - vq->num_added_since_kick);
new = vq->master_vring_avail.idx;
vq->num_added_since_kick = 0;
if (_vq->vdev->event_suppression_enabled) {
return wrap_around || (bool)vring_need_event(vring_avail_event(&vq->vring), new, old);
} else {
return !(vq->vring.used->flags & VIRTQ_USED_F_NO_NOTIFY);
}
}
/* Notifies the device even if it's not necessary according to the event suppression logic */
static void virtqueue_kick_always_split(struct virtqueue *_vq)
{
struct virtqueue_split *vq = splitvq(_vq);
KeMemoryBarrier();
vq->num_added_since_kick = 0;
virtqueue_notify(_vq);
}
/* Enables interrupts on a virtqueue and returns false if the queue has at least one returned
* buffer available to be fetched by virtqueue_get_buf, true otherwise */
static bool virtqueue_enable_cb_split(struct virtqueue *_vq)
{
struct virtqueue_split *vq = splitvq(_vq);
if (!virtqueue_is_interrupt_enabled(_vq)) {
vq->master_vring_avail.flags &= ~VIRTQ_AVAIL_F_NO_INTERRUPT;
if (!_vq->vdev->event_suppression_enabled)
{
vq->vring.avail->flags = vq->master_vring_avail.flags;
}
}
vring_used_event(&vq->vring) = vq->last_used;
KeMemoryBarrier();
return (vq->last_used == vq->vring.used->idx);
}
/* Enables interrupts on a virtqueue after ~3/4 of the currently pushed buffers have been
* returned, returns false if this condition currently holds, false otherwise */
static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
{
struct virtqueue_split *vq = splitvq(_vq);
u16 bufs;
if (!virtqueue_is_interrupt_enabled(_vq)) {
vq->master_vring_avail.flags &= ~VIRTQ_AVAIL_F_NO_INTERRUPT;
if (!_vq->vdev->event_suppression_enabled)
{
vq->vring.avail->flags = vq->master_vring_avail.flags;
}
}
/* Note that 3/4 is an arbitrary threshold */
bufs = (u16)(vq->master_vring_avail.idx - vq->last_used) * 3 / 4;
vring_used_event(&vq->vring) = vq->last_used + bufs;
KeMemoryBarrier();
return ((vq->vring.used->idx - vq->last_used) <= bufs);
}
/* Disables interrupts on a virtqueue */
static void virtqueue_disable_cb_split(struct virtqueue *_vq)
{
struct virtqueue_split *vq = splitvq(_vq);
if (virtqueue_is_interrupt_enabled(_vq)) {
vq->master_vring_avail.flags |= VIRTQ_AVAIL_F_NO_INTERRUPT;
if (!_vq->vdev->event_suppression_enabled)
{
vq->vring.avail->flags = vq->master_vring_avail.flags;
}
}
}
/* Returns true if interrupts are enabled on a virtqueue, false otherwise */
static BOOLEAN virtqueue_is_interrupt_enabled_split(struct virtqueue *_vq)
{
struct virtqueue_split *vq = splitvq(_vq);
return !(vq->master_vring_avail.flags & VIRTQ_AVAIL_F_NO_INTERRUPT);
}
/* Re-initializes an already initialized virtqueue */
static void virtqueue_shutdown_split(struct virtqueue *_vq)
{
struct virtqueue_split *vq = splitvq(_vq);
unsigned int num = vq->vring.num;
void *pages = vq->vring.desc;
unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES;
RtlZeroMemory(pages, vring_size_split(num, vring_align));
(void)vring_new_virtqueue_split(
_vq->index,
vq->vring.num,
vring_align,
_vq->vdev,
pages,
_vq->notification_cb,
vq);
}
/* Gets the opaque pointer associated with a not-yet-returned buffer, or NULL if no buffer is available
* to aid drivers with cleaning up all data on virtqueue shutdown */
static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
{
struct virtqueue_split *vq = splitvq(_vq);
u16 idx;
void *opaque = NULL;
for (idx = 0; idx < (u16)vq->vring.num; idx++) {
opaque = vq->opaque[idx];
if (opaque) {
put_unused_desc_chain(vq, idx);
vq->vring.avail->idx = --vq->master_vring_avail.idx;
break;
}
}
return opaque;
}
/* Returns the size of the virtqueue structure including
* additional size for per-descriptor data */
unsigned int vring_control_block_size(u16 qsize, bool packed)
{
unsigned int res;
if (packed) {
return vring_control_block_size_packed(qsize);
}
res = sizeof(struct virtqueue_split);
res += sizeof(void *) * qsize;
return res;
}
/* Initializes a new virtqueue using already allocated memory */
struct virtqueue *vring_new_virtqueue_split(
unsigned int index, /* virtqueue index */
unsigned int num, /* virtqueue size (always a power of 2) */
unsigned int vring_align, /* vring alignment requirement */
VirtIODevice *vdev, /* the virtio device owning the queue */
void *pages, /* vring memory */
void(*notify)(struct virtqueue *), /* notification callback */
void *control) /* virtqueue memory */
{
struct virtqueue_split *vq = splitvq(control);
u16 i;
if (DESC_INDEX(num, num) != 0) {
DPrintf(0, "Virtqueue length %u is not a power of 2\n", num);
return NULL;
}
RtlZeroMemory(vq, sizeof(*vq) + num * sizeof(void *));
vring_init(&vq->vring, num, pages, vring_align);
vq->vq.vdev = vdev;
vq->vq.notification_cb = notify;
vq->vq.index = index;
/* Build a linked list of unused descriptors */
vq->num_unused = num;
vq->first_unused = 0;
for (i = 0; i < num - 1; i++) {
vq->vring.desc[i].flags = VIRTQ_DESC_F_NEXT;
vq->vring.desc[i].next = i + 1;
}
vq->vq.avail_va = vq->vring.avail;
vq->vq.used_va = vq->vring.used;
vq->vq.add_buf = virtqueue_add_buf_split;
vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_split;
vq->vq.disable_cb = virtqueue_disable_cb_split;
vq->vq.enable_cb = virtqueue_enable_cb_split;
vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_split;
vq->vq.get_buf = virtqueue_get_buf_split;
vq->vq.has_buf = virtqueue_has_buf_split;
vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_split;
vq->vq.kick_always = virtqueue_kick_always_split;
vq->vq.kick_prepare = virtqueue_kick_prepare_split;
vq->vq.shutdown = virtqueue_shutdown_split;
return &vq->vq;
}
/* Negotiates virtio transport features */
void vring_transport_features(
VirtIODevice *vdev,
u64 *features) /* points to device features on entry and driver accepted features on return */
{
unsigned int i;
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
if (i != VIRTIO_RING_F_INDIRECT_DESC &&
i != VIRTIO_RING_F_EVENT_IDX &&
i != VIRTIO_F_VERSION_1) {
virtio_feature_disable(*features, i);
}
}
}
/* Returns the max number of scatter-gather elements that fit in an indirect pages */
u32 virtio_get_indirect_page_capacity()
{
return PAGE_SIZE / sizeof(struct vring_desc);
}
unsigned long vring_size(unsigned int num, unsigned long align, bool packed)
{
if (packed) {
return vring_size_packed(num, align);
} else {
return vring_size_split(num, align);
}
}

View file

@ -0,0 +1,11 @@
#pragma once
extern int virtioDebugLevel;
extern int bDebugPrint;
typedef void (*tDebugPrintFunc)(const char *format, ...);
extern tDebugPrintFunc VirtioDebugPrintProc;
#define DPrintf(Level, MSG, ...) if ((!bDebugPrint) || Level > virtioDebugLevel) {} else VirtioDebugPrintProc(MSG, __VA_ARGS__)
#define DEBUG_ENTRY(level) DPrintf(level, "[%s]=>\n", __FUNCTION__)
#define DEBUG_EXIT_STATUS(level, status) DPrintf((status == NDIS_STATUS_SUCCESS ? level : 0), "[%s]<=0x%X\n", __FUNCTION__, (status))

View file

@ -0,0 +1,19 @@
#ifndef _LINUX_TYPES_H
#define _LINUX_TYPES_H
#define __bitwise__
// #define __attribute__(x)
#define u8 unsigned char
#define u16 unsigned short
#define u32 unsigned long
#define u64 ULONGLONG
#define __u8 unsigned char
#define __u16 unsigned short
#define __le16 unsigned short
#define __u32 unsigned long
#define __le32 unsigned long
#define __u64 ULONGLONG
#endif /* _LINUX_TYPES_H */

View file

@ -0,0 +1,73 @@
#ifndef _UAPI_LINUX_VIRTIO_CONFIG_H
#define _UAPI_LINUX_VIRTIO_CONFIG_H
/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
* anyone can use the definitions to implement compatible drivers/servers.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
/* Virtio devices use a standardized configuration space to define their
* features and pass configuration information, but each implementation can
* store and access that space differently. */
/* Status byte for guest to report progress, and synchronize features. */
/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
/* We have found a driver for the device. */
#define VIRTIO_CONFIG_S_DRIVER 2
/* Driver has used its parts of the config, and is happy */
#define VIRTIO_CONFIG_S_DRIVER_OK 4
/* Driver has finished configuring features */
#define VIRTIO_CONFIG_S_FEATURES_OK 8
/* Device entered invalid state, driver SHOULD reset it */
#define VIRTIO_CONFIG_S_NEEDS_RESET 0x40
/* We've given up on this device. */
#define VIRTIO_CONFIG_S_FAILED 0x80
/* virtio library features bits */
/* Some virtio feature bits (currently bits 28 through 31) are reserved for the
* transport being used (eg. virtio_ring), the rest are per-device feature
* bits. */
#define VIRTIO_TRANSPORT_F_START 28
#define VIRTIO_TRANSPORT_F_END 32
/* Do we get callbacks when the ring is completely used, even if we've
* suppressed them? */
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
/* Can the device handle any descriptor layout? */
#define VIRTIO_F_ANY_LAYOUT 27
/* v1.0 compliant. */
#define VIRTIO_F_VERSION_1 32
#define VIRTIO_F_IOMMU_PLATFORM 33
/* This feature indicates support for the packed virtqueue layout. */
#define VIRTIO_F_RING_PACKED 34
// if this number is not equal to desc size, queue creation fails
#define SIZE_OF_SINGLE_INDIRECT_DESC 16
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */

View file

@ -0,0 +1,47 @@
#ifndef _UAPI_LINUX_VIRTIO_TYPES_H
#define _UAPI_LINUX_VIRTIO_TYPES_H
/* Type definitions for virtio implementations.
*
* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Copyright (C) 2014 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
*/
#include "linux/types.h"
/*
* __virtio{16,32,64} have the following meaning:
* - __u{16,32,64} for virtio devices in legacy mode, accessed in native endian
* - __le{16,32,64} for standard-compliant virtio devices
*/
typedef __u16 __bitwise__ __virtio16;
typedef __u32 __bitwise__ __virtio32;
typedef __u64 __bitwise__ __virtio64;
#endif /* _UAPI_LINUX_VIRTIO_TYPES_H */

View file

@ -0,0 +1,39 @@
//////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2007 Qumranet All Rights Reserved
//
// Module Name:
// osdep.h
//
// Abstract:
// Windows OS dependent definitions of data types
//
// Author:
// Yan Vugenfirer - February 2007.
//
//////////////////////////////////////////////////////////////////////////////////////////
#pragma once
#include <ntddk.h>
#ifdef __REACTOS__
#ifdef __GNUC__
#undef FORCEINLINE
#define FORCEINLINE __attribute__((__always_inline__))
#endif
#endif
#ifndef __REACTOS__
#define ENOSPC 1
#endif
#if !defined(__cplusplus) && !defined(bool)
// Important note: in MSFT C++ bool length is 1 bytes
// C++ does not define length of bool
// inconsistent definition of 'bool' may create compatibility problems
#define bool u8
#define false FALSE
#define true TRUE
#endif
#define SMP_CACHE_BYTES 64

View file

@ -0,0 +1,392 @@
/*
* Virtio PCI driver
*
* This module allows virtio devices to be used over a virtual PCI device.
* This can be used with QEMU based VMMs like KVM or Xen.
*
* Copyright IBM Corp. 2007
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _LINUX_VIRTIO_PCI_H
#define _LINUX_VIRTIO_PCI_H
#include "linux/types.h"
#include "linux/virtio_config.h"
#ifndef VIRTIO_PCI_NO_LEGACY
/* A 32-bit r/o bitmask of the features supported by the host */
#define VIRTIO_PCI_HOST_FEATURES 0
/* A 32-bit r/w bitmask of features activated by the guest */
#define VIRTIO_PCI_GUEST_FEATURES 4
/* A 32-bit r/w PFN for the currently selected queue */
#define VIRTIO_PCI_QUEUE_PFN 8
/* A 16-bit r/o queue size for the currently selected queue */
#define VIRTIO_PCI_QUEUE_NUM 12
/* A 16-bit r/w queue selector */
#define VIRTIO_PCI_QUEUE_SEL 14
/* A 16-bit r/w queue notifier */
#define VIRTIO_PCI_QUEUE_NOTIFY 16
/* An 8-bit device status register. */
#define VIRTIO_PCI_STATUS 18
/* An 8-bit r/o interrupt status register. Reading the value will return the
* current contents of the ISR and will also clear it. This is effectively
* a read-and-acknowledge. */
#define VIRTIO_PCI_ISR 19
/* MSI-X registers: only enabled if MSI-X is enabled. */
/* A 16-bit vector for configuration changes. */
#define VIRTIO_MSI_CONFIG_VECTOR 20
/* A 16-bit vector for selected queue notifications. */
#define VIRTIO_MSI_QUEUE_VECTOR 22
/* The remaining space is defined by each driver as the per-driver
* configuration space */
#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20)
/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
#define VIRTIO_PCI_CONFIG(msix_enabled) VIRTIO_PCI_CONFIG_OFF(msix_enabled)
/* How many bits to shift physical queue address written to QUEUE_PFN.
* 12 is historical, and due to x86 page size. */
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
/* The alignment to use between consumer and producer parts of vring.
* x86 pagesize again. */
#define VIRTIO_PCI_VRING_ALIGN 4096
#endif /* VIRTIO_PCI_NO_LEGACY */
/* The bit of the ISR which indicates a device configuration change. */
#define VIRTIO_PCI_ISR_CONFIG 0x2
/* Vector value used to disable MSI for queue */
#define VIRTIO_MSI_NO_VECTOR 0xffff
/* IDs for different capabilities. Must all exist. */
/* Common configuration */
#define VIRTIO_PCI_CAP_COMMON_CFG 1
/* Notifications */
#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
/* ISR access */
#define VIRTIO_PCI_CAP_ISR_CFG 3
/* Device specific configuration */
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
/* PCI configuration access */
#define VIRTIO_PCI_CAP_PCI_CFG 5
/* This is the PCI capability header: */
struct virtio_pci_cap {
__u8 cap_vndr; /* Generic PCI field: PCI_CAPABILITY_ID_VENDOR_SPECIFIC */
__u8 cap_next; /* Generic PCI field: next ptr. */
__u8 cap_len; /* Generic PCI field: capability length */
__u8 cfg_type; /* Identifies the structure. */
__u8 bar; /* Where to find it. */
__u8 padding[3]; /* Pad to full dword. */
__le32 offset; /* Offset within bar. */
__le32 length; /* Length of the structure, in bytes. */
};
struct virtio_pci_notify_cap {
struct virtio_pci_cap cap;
__le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
};
/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
struct virtio_pci_common_cfg {
/* About the whole device. */
__le32 device_feature_select; /* read-write */
__le32 device_feature; /* read-only */
__le32 guest_feature_select; /* read-write */
__le32 guest_feature; /* read-write */
__le16 msix_config; /* read-write */
__le16 num_queues; /* read-only */
__u8 device_status; /* read-write */
__u8 config_generation; /* read-only */
/* About a specific virtqueue. */
__le16 queue_select; /* read-write */
__le16 queue_size; /* read-write, power of 2. */
__le16 queue_msix_vector; /* read-write */
__le16 queue_enable; /* read-write */
__le16 queue_notify_off; /* read-only */
__le32 queue_desc_lo; /* read-write */
__le32 queue_desc_hi; /* read-write */
__le32 queue_avail_lo; /* read-write */
__le32 queue_avail_hi; /* read-write */
__le32 queue_used_lo; /* read-write */
__le32 queue_used_hi; /* read-write */
};
#define MAX_QUEUES_PER_DEVICE_DEFAULT 8
typedef struct virtio_queue_info
{
/* the actual virtqueue */
struct virtqueue *vq;
/* the number of entries in the queue */
u16 num;
/* the virtual address of the ring queue */
void *queue;
} VirtIOQueueInfo;
typedef struct virtio_system_ops {
// device register access
u8 (*vdev_read_byte)(ULONG_PTR ulRegister);
u16 (*vdev_read_word)(ULONG_PTR ulRegister);
u32 (*vdev_read_dword)(ULONG_PTR ulRegister);
void (*vdev_write_byte)(ULONG_PTR ulRegister, u8 bValue);
void (*vdev_write_word)(ULONG_PTR ulRegister, u16 wValue);
void (*vdev_write_dword)(ULONG_PTR ulRegister, u32 ulValue);
// memory management
void *(*mem_alloc_contiguous_pages)(void *context, size_t size);
void (*mem_free_contiguous_pages)(void *context, void *virt);
ULONGLONG (*mem_get_physical_address)(void *context, void *virt);
void *(*mem_alloc_nonpaged_block)(void *context, size_t size);
void (*mem_free_nonpaged_block)(void *context, void *addr);
// PCI config space access
int (*pci_read_config_byte)(void *context, int where, u8 *bVal);
int (*pci_read_config_word)(void *context, int where, u16 *wVal);
int (*pci_read_config_dword)(void *context, int where, u32 *dwVal);
// PCI resource handling
size_t (*pci_get_resource_len)(void *context, int bar);
void *(*pci_map_address_range)(void *context, int bar, size_t offset, size_t maxlen);
// misc
u16 (*vdev_get_msix_vector)(void *context, int queue);
void (*vdev_sleep)(void *context, unsigned int msecs);
} VirtIOSystemOps;
struct virtio_device;
typedef struct virtio_device VirtIODevice;
struct virtio_device_ops
{
// read/write device config and read config generation counter
void (*get_config)(VirtIODevice *vdev, unsigned offset, void *buf, unsigned len);
void (*set_config)(VirtIODevice *vdev, unsigned offset, const void *buf, unsigned len);
u32 (*get_config_generation)(VirtIODevice *vdev);
// read/write device status byte and reset the device
u8 (*get_status)(VirtIODevice *vdev);
void (*set_status)(VirtIODevice *vdev, u8 status);
void (*reset)(VirtIODevice *vdev);
// get/set device feature bits
u64 (*get_features)(VirtIODevice *vdev);
NTSTATUS (*set_features)(VirtIODevice *vdev, u64 features);
// set config/queue MSI interrupt vector, returns the new vector
u16 (*set_config_vector)(VirtIODevice *vdev, u16 vector);
u16 (*set_queue_vector)(struct virtqueue *vq, u16 vector);
// query virtual queue size and memory requirements
NTSTATUS (*query_queue_alloc)(VirtIODevice *vdev,
unsigned index, unsigned short *pNumEntries,
unsigned long *pRingSize,
unsigned long *pHeapSize);
// allocate and initialize a queue
NTSTATUS (*setup_queue)(struct virtqueue **queue,
VirtIODevice *vdev, VirtIOQueueInfo *info,
unsigned idx, u16 msix_vec);
// tear down and deallocate a queue
void (*delete_queue)(VirtIOQueueInfo *info);
};
struct virtio_device
{
// the I/O port BAR of the PCI device (legacy virtio devices only)
ULONG_PTR addr;
// true if the device uses MSI interrupts
bool msix_used;
// true if the VIRTIO_RING_F_EVENT_IDX feature flag has been negotiated
bool event_suppression_enabled;
// true if the VIRTIO_F_RING_PACKED feature flag has been negotiated
bool packed_ring;
// internal device operations, implemented separately for legacy and modern
const struct virtio_device_ops *device;
// external callbacks implemented separately by different driver model drivers
const struct virtio_system_ops *system;
// opaque context value passed as first argument to virtio_system_ops callbacks
void *DeviceContext;
// the ISR status field, reading causes the device to de-assert an interrupt
volatile u8 *isr;
// modern virtio device capabilities and related state
volatile struct virtio_pci_common_cfg *common;
volatile unsigned char *config;
volatile unsigned char *notify_base;
int notify_map_cap;
u32 notify_offset_multiplier;
size_t config_len;
size_t notify_len;
// maximum number of virtqueues that fit in the memory block pointed to by info
ULONG maxQueues;
// points to inline_info if not more than MAX_QUEUES_PER_DEVICE_DEFAULT queues
// are used, or to an external allocation otherwise
VirtIOQueueInfo *info;
VirtIOQueueInfo inline_info[MAX_QUEUES_PER_DEVICE_DEFAULT];
};
/* Driver API: device init and shutdown
* DeviceContext is a driver defined opaque value which will be passed to driver
* supplied callbacks described in pSystemOps. pSystemOps must be non-NULL and all
* its fields must be non-NULL. msix_used is true if and only if the device is
* configured with MSI support.
*/
NTSTATUS virtio_device_initialize(VirtIODevice *vdev,
const VirtIOSystemOps *pSystemOps,
void *DeviceContext,
bool msix_used);
void virtio_device_shutdown(VirtIODevice *vdev);
/* Driver API: device status manipulation
* virtio_set_status should not be called by new drivers. Device status should only
* be getting its bits set with virtio_add_status and reset all back to 0 with
* virtio_device_reset. virtio_device_ready is a special version of virtio_add_status
* which adds the VIRTIO_CONFIG_S_DRIVER_OK status bit.
*/
u8 virtio_get_status(VirtIODevice *vdev);
void virtio_set_status(VirtIODevice *vdev, u8 status);
void virtio_add_status(VirtIODevice *vdev, u8 status);
void virtio_device_reset(VirtIODevice *vdev);
void virtio_device_ready(VirtIODevice *vdev);
/* Driver API: device feature bitmap manipulation
* Features passed to virtio_set_features should be a subset of features offered by
* the device as returned from virtio_get_features. virtio_set_features sets the
* VIRTIO_CONFIG_S_FEATURES_OK status bit if it is supported by the device.
*/
#define virtio_is_feature_enabled(FeaturesList, Feature) (!!((FeaturesList) & (1ULL << (Feature))))
#define virtio_feature_enable(FeaturesList, Feature) ((FeaturesList) |= (1ULL << (Feature)))
#define virtio_feature_disable(FeaturesList, Feature) ((FeaturesList) &= ~(1ULL << (Feature)))
u64 virtio_get_features(VirtIODevice *dev);
NTSTATUS virtio_set_features(VirtIODevice *vdev, u64 features);
/* Driver API: device configuration access
* Both virtio_get_config and virtio_set_config support arbitrary values of the len
* parameter. Config items of length 1, 2, and 4 are read/written using one access,
* length 8 is broken down to two 4 bytes accesses, and any other length is read or
* written byte by byte.
*/
void virtio_get_config(VirtIODevice *vdev, unsigned offset,
void *buf, unsigned len);
void virtio_set_config(VirtIODevice *vdev, unsigned offset,
void *buf, unsigned len);
/* Driver API: virtqueue setup
* virtio_reserve_queue_memory makes VirtioLib reserve memory for its virtqueue
* bookkeeping. Drivers should call this function if they intend to set up queues
* one by one with virtio_find_queue. virtio_find_queues (plural) internally takes
* care of the reservation and virtio_reserve_queue_memory need not be called.
* Note that in addition to queue interrupt vectors, virtio_find_queues also sets
* up the device config vector as a convenience.
* Drivers should treat the returned struct virtqueue pointers as opaque handles.
*/
NTSTATUS virtio_query_queue_allocation(VirtIODevice *vdev, unsigned index,
unsigned short *pNumEntries,
unsigned long *pRingSize,
unsigned long *pHeapSize);
NTSTATUS virtio_reserve_queue_memory(VirtIODevice *vdev, unsigned nvqs);
NTSTATUS virtio_find_queue(VirtIODevice *vdev, unsigned index,
struct virtqueue **vq);
NTSTATUS virtio_find_queues(VirtIODevice *vdev, unsigned nvqs,
struct virtqueue *vqs[]);
/* Driver API: virtqueue shutdown
* The device must be reset and re-initialized to re-setup queues after they have
* been deleted.
*/
void virtio_delete_queue(struct virtqueue *vq);
void virtio_delete_queues(VirtIODevice *vdev);
/* Driver API: virtqueue query and manipulation
* virtio_get_queue_descriptor_size
* is useful in situations where the driver has to prepare for the memory allocation
* performed by virtio_reserve_queue_memory beforehand.
*/
u32 virtio_get_queue_size(struct virtqueue *vq);
unsigned long virtio_get_indirect_page_capacity();
static ULONG FORCEINLINE virtio_get_queue_descriptor_size()
{
return sizeof(VirtIOQueueInfo);
}
/* Driver API: interrupt handling
* virtio_set_config_vector and virtio_set_queue_vector set the MSI vector used for
* device configuration interrupt and queue interrupt, respectively. The driver may
* choose to either return the vector from the vdev_get_msix_vector callback (called
* as part of queue setup) or call these functions later. Note that setting the vector
* may fail which is indicated by the return value of VIRTIO_MSI_NO_VECTOR.
* virtio_read_isr_status returns the value of the ISR status register, note that it
* is not idempotent, calling the function makes the device de-assert the interrupt.
*/
u16 virtio_set_config_vector(VirtIODevice *vdev, u16 vector);
u16 virtio_set_queue_vector(struct virtqueue *vq, u16 vector);
u8 virtio_read_isr_status(VirtIODevice *vdev);
/* Driver API: miscellaneous helpers
* virtio_get_bar_index returns the corresponding BAR index given its physical address.
* This tends to be useful to all drivers since Windows doesn't provide reliable BAR
* indices as part of resource enumeration. The function returns -1 on failure.
*/
int virtio_get_bar_index(PPCI_COMMON_HEADER pPCIHeader, PHYSICAL_ADDRESS BasePA);
#endif

View file

@ -0,0 +1,88 @@
#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
/*
* Virtio PCI driver - APIs for common functionality for all device versions
*
* Copyright IBM Corp. 2007
* Copyright Red Hat, Inc. 2014
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Rusty Russell <rusty@rustcorp.com.au>
* Michael S. Tsirkin <mst@redhat.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#define ioread8(vdev, addr) \
vdev->system->vdev_read_byte((ULONG_PTR)(addr))
#define ioread16(vdev, addr) \
vdev->system->vdev_read_word((ULONG_PTR)(addr))
#define ioread32(vdev, addr) \
vdev->system->vdev_read_dword((ULONG_PTR)(addr))
#define iowrite8(vdev, val, addr) \
vdev->system->vdev_write_byte((ULONG_PTR)(addr), val)
#define iowrite16(vdev, val, addr) \
vdev->system->vdev_write_word((ULONG_PTR)(addr), val)
#define iowrite32(vdev, val, addr) \
vdev->system->vdev_write_dword((ULONG_PTR)(addr), val)
#define iowrite64_twopart(vdev, val, lo_addr, hi_addr) \
vdev->system->vdev_write_dword((ULONG_PTR)(lo_addr), (u32)(val)); \
vdev->system->vdev_write_dword((ULONG_PTR)(hi_addr), (val) >> 32)
#define mem_alloc_contiguous_pages(vdev, size) \
vdev->system->mem_alloc_contiguous_pages(vdev->DeviceContext, size)
#define mem_free_contiguous_pages(vdev, virt) \
vdev->system->mem_free_contiguous_pages(vdev->DeviceContext, virt)
#define mem_get_physical_address(vdev, virt) \
vdev->system->mem_get_physical_address(vdev->DeviceContext, virt)
#define mem_alloc_nonpaged_block(vdev, size) \
vdev->system->mem_alloc_nonpaged_block(vdev->DeviceContext, size)
#define mem_free_nonpaged_block(vdev, addr) \
vdev->system->mem_free_nonpaged_block(vdev->DeviceContext, addr)
#define pci_read_config_byte(vdev, where, bVal) \
vdev->system->pci_read_config_byte(vdev->DeviceContext, where, bVal)
#define pci_read_config_word(vdev, where, wVal) \
vdev->system->pci_read_config_word(vdev->DeviceContext, where, wVal)
#define pci_read_config_dword(vdev, where, dwVal) \
vdev->system->pci_read_config_dword(vdev->DeviceContext, where, dwVal)
#define pci_get_resource_len(vdev, bar) \
vdev->system->pci_get_resource_len(vdev->DeviceContext, bar)
#define pci_map_address_range(vdev, bar, offset, maxlen) \
vdev->system->pci_map_address_range(vdev->DeviceContext, bar, offset, maxlen)
#define vdev_get_msix_vector(vdev, queue) \
vdev->system->vdev_get_msix_vector(vdev->DeviceContext, queue)
#define vdev_sleep(vdev, msecs) \
vdev->system->vdev_sleep(vdev->DeviceContext, msecs)
/* the notify function used when creating a virt queue */
void vp_notify(struct virtqueue *vq);
NTSTATUS vio_legacy_initialize(VirtIODevice *vdev);
NTSTATUS vio_modern_initialize(VirtIODevice *vdev);
#endif

View file

@ -0,0 +1,50 @@
#ifndef _UAPI_LINUX_VIRTIO_RING_H
#define _UAPI_LINUX_VIRTIO_RING_H
/* An interface for efficient virtio implementation, currently for use by KVM
* and lguest, but hopefully others soon. Do NOT change this since it will
* break existing servers and clients.
*
* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Copyright Rusty Russell IBM Corporation 2007. */
#include "linux/types.h"
#include "linux/virtio_types.h"
/* We support indirect buffer descriptors */
#define VIRTIO_RING_F_INDIRECT_DESC 28
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
/* The Host publishes the avail index for which it expects a kick
* at the end of the used ring. Guest should ignore the used->flags field. */
#define VIRTIO_RING_F_EVENT_IDX 29
void vring_transport_features(VirtIODevice *vdev, u64 *features);
unsigned long vring_size(unsigned int num, unsigned long align, bool packed);
#endif /* _UAPI_LINUX_VIRTIO_RING_H */

View file

@ -0,0 +1,24 @@
#ifndef _VIRTIO_RING_ALLOCATION_H
#define _VIRTIO_RING_ALLOCATION_H
struct virtqueue *vring_new_virtqueue_split(unsigned int index,
unsigned int num,
unsigned int vring_align,
VirtIODevice *vdev,
void *pages,
void (*notify)(struct virtqueue *),
void *control);
struct virtqueue *vring_new_virtqueue_packed(unsigned int index,
unsigned int num,
unsigned int vring_align,
VirtIODevice *vdev,
void *pages,
void (*notify)(struct virtqueue *),
void *control);
unsigned int vring_control_block_size(u16 qsize, bool packed);
unsigned int vring_control_block_size_packed(u16 qsize);
unsigned long vring_size_packed(unsigned int num, unsigned long align);
#endif /* _VIRTIO_RING_ALLOCATION_H */

View file

@ -0,0 +1,452 @@
/*
* This file contains driver-related part of NDIS5.X adapter driver.
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "ParaNdis5.h"
//#define NO_XP_POWER_MANAGEMENT
#ifdef WPP_EVENT_TRACING
#include "ParaNdis5-Driver.tmh"
#endif
static NDIS_HANDLE DriverHandle;
static ULONG gID = 0;
/******************************************************
Unload handler, only responsibility is cleanup WPP
*******************************************************/
static VOID NTAPI ParaVirtualNICUnload(IN PDRIVER_OBJECT pDriverObject)
{
DEBUG_ENTRY(0);
ParaNdis_DebugCleanup(pDriverObject);
}
/*************************************************************
Required NDIS function
Responsible to put the adapter to known (initial) hardware state
Do not call any NDIS functions
*************************************************************/
static VOID NTAPI ParaNdis5_Shutdown(IN NDIS_HANDLE MiniportAdapterContext)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext;
ParaNdis_OnShutdown(pContext);
}
/******************************************************
Required NDIS procedure
Allocates and initializes adapter context
Finally sets send and receive to Enabled state and reports connect
Returns:
NDIS_STATUS SUCCESS or some error code
*******************************************************/
static NDIS_STATUS NTAPI ParaNdis5_Initialize(OUT PNDIS_STATUS OpenErrorStatus,
OUT PUINT SelectedMediumIndex,
IN PNDIS_MEDIUM MediumArray,
IN UINT MediumArraySize,
IN NDIS_HANDLE MiniportAdapterHandle,
IN NDIS_HANDLE WrapperConfigurationContext)
{
NDIS_STATUS status = NDIS_STATUS_UNSUPPORTED_MEDIA;
PARANDIS_ADAPTER *pContext = NULL;
UINT i;
for(i = 0; i < MediumArraySize; ++i)
{
if(MediumArray[i] == NdisMedium802_3)
{
*SelectedMediumIndex = i;
status = NDIS_STATUS_SUCCESS;
break;
}
}
if (status == NDIS_STATUS_SUCCESS)
{
pContext =
(PARANDIS_ADAPTER *)ParaNdis_AllocateMemory(NULL, sizeof(PARANDIS_ADAPTER));
if (!pContext)
{
status = NDIS_STATUS_RESOURCES;
}
}
if (status == NDIS_STATUS_SUCCESS)
{
PVOID pResourceList = &status;
UINT uSize = 0;
NdisZeroMemory(pContext, sizeof(PARANDIS_ADAPTER));
pContext->ulUniqueID = NdisInterlockedIncrement(&gID);
pContext->DriverHandle = DriverHandle;
pContext->MiniportHandle = MiniportAdapterHandle;
pContext->WrapperConfigurationHandle = WrapperConfigurationContext;
NdisMQueryAdapterResources(&status, WrapperConfigurationContext, pResourceList, &uSize);
if (uSize > 0)
pResourceList = ParaNdis_AllocateMemory(MiniportAdapterHandle, uSize);
else
pResourceList = NULL;
if (!pResourceList)
status = uSize > 0 ? NDIS_STATUS_RESOURCES : NDIS_STATUS_FAILURE;
else
{
ULONG attributes;
attributes = NDIS_ATTRIBUTE_DESERIALIZE | NDIS_ATTRIBUTE_BUS_MASTER;
// in XP SP2, if this flag is NOT set, the NDIS halts miniport
// upon transition to S1..S4.
// it seems that XP SP3 ignores it and always sends SET_POWER to D3
#ifndef NO_XP_POWER_MANAGEMENT
attributes |= NDIS_ATTRIBUTE_NO_HALT_ON_SUSPEND;
#endif
NdisMSetAttributesEx(
MiniportAdapterHandle,
pContext,
0,
attributes,
NdisInterfacePci);
NdisMQueryAdapterResources(&status, WrapperConfigurationContext, pResourceList, &uSize);
status = ParaNdis_InitializeContext(pContext, (PNDIS_RESOURCE_LIST)pResourceList);
NdisFreeMemory(pResourceList, 0, 0);
}
}
if (status == NDIS_STATUS_SUCCESS)
{
status = ParaNdis_FinishInitialization(pContext);
if (status == NDIS_STATUS_SUCCESS)
{
ParaNdis_DebugRegisterMiniport(pContext, TRUE);
ParaNdis_IndicateConnect(pContext, FALSE, TRUE);
ParaNdis5_StopSend(pContext, FALSE, NULL);
ParaNdis5_StopReceive(pContext, FALSE, NULL);
if (!pContext->ulMilliesToConnect)
{
ParaNdis_ReportLinkStatus(pContext, FALSE);
}
else
{
NdisSetTimer(&pContext->ConnectTimer, pContext->ulMilliesToConnect);
}
}
else
{
ParaNdis_CleanupContext(pContext);
}
}
if (status != NDIS_STATUS_SUCCESS && pContext)
{
NdisFreeMemory(pContext, 0, 0);
}
DEBUG_EXIT_STATUS(0, status);
return status;
}
/*************************************************************
Callback of delayed receive pause procedure upon reset request
*************************************************************/
static void OnReceiveStoppedOnReset(VOID *p)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)p;
DEBUG_ENTRY(0);
NdisSetEvent(&pContext->ResetEvent);
}
/*************************************************************
Callback of delayed send pause procedure upon reset request
*************************************************************/
static void OnSendStoppedOnReset(VOID *p)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)p;
DEBUG_ENTRY(0);
NdisSetEvent(&pContext->ResetEvent);
}
VOID ParaNdis_Suspend(PARANDIS_ADAPTER *pContext)
{
DEBUG_ENTRY(0);
NdisResetEvent(&pContext->ResetEvent);
if (NDIS_STATUS_PENDING != ParaNdis5_StopSend(pContext, TRUE, OnSendStoppedOnReset))
{
NdisSetEvent(&pContext->ResetEvent);
}
NdisWaitEvent(&pContext->ResetEvent, 0);
NdisResetEvent(&pContext->ResetEvent);
if (NDIS_STATUS_PENDING != ParaNdis5_StopReceive(pContext, TRUE, OnReceiveStoppedOnReset))
{
NdisSetEvent(&pContext->ResetEvent);
}
NdisWaitEvent(&pContext->ResetEvent, 0);
NdisResetEvent(&pContext->ResetEvent);
DEBUG_EXIT_STATUS(0, 0);
}
VOID ParaNdis_Resume(PARANDIS_ADAPTER *pContext)
{
ParaNdis5_StopSend(pContext, FALSE, NULL);
ParaNdis5_StopReceive(pContext, FALSE, NULL);
DEBUG_EXIT_STATUS(0, 0);
}
static void NTAPI OnResetWorkItem(NDIS_WORK_ITEM * pWorkItem, PVOID Context)
{
tGeneralWorkItem *pwi = (tGeneralWorkItem *)pWorkItem;
PARANDIS_ADAPTER *pContext = pwi->pContext;
DEBUG_ENTRY(0);
pContext->bResetInProgress = TRUE;
ParaNdis_IndicateConnect(pContext, FALSE, FALSE);
ParaNdis_Suspend(pContext);
ParaNdis_Resume(pContext);
pContext->bResetInProgress = FALSE;
ParaNdis_ReportLinkStatus(pContext, FALSE);
NdisFreeMemory(pwi, 0, 0);
ParaNdis_DebugHistory(pContext, hopSysReset, NULL, 0, NDIS_STATUS_SUCCESS, 0);
NdisMResetComplete(pContext->MiniportHandle, NDIS_STATUS_SUCCESS, TRUE);
}
/*************************************************************
Required NDIS procedure
Called when some procedure (like OID handler) returns PENDING and
does not complete or when CheckForHang return TRUE
*************************************************************/
static NDIS_STATUS NTAPI ParaNdis5_Reset(
OUT PBOOLEAN AddressingReset,
IN NDIS_HANDLE MiniportAdapterContext)
{
NDIS_STATUS status;
tGeneralWorkItem *pwi;
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext;
DEBUG_ENTRY(0);
ParaNdis_DebugHistory(pContext, hopSysReset, NULL, 1, 0, 0);
status = NDIS_STATUS_FAILURE;
pwi = ParaNdis_AllocateMemory(pContext, sizeof(tGeneralWorkItem));
if (pwi)
{
pwi->pContext = pContext;
NdisInitializeWorkItem(&pwi->wi, OnResetWorkItem, pwi);
if (NdisScheduleWorkItem(&pwi->wi) == NDIS_STATUS_SUCCESS)
{
status = NDIS_STATUS_PENDING;
}
else
{
NdisFreeMemory(pwi, 0, 0);
}
}
if (status != NDIS_STATUS_PENDING)
{
ParaNdis_DebugHistory(pContext, hopSysReset, NULL, 0, status, 0);
}
return status;
}
/*************************************************************
Callback of delayed receive pause procedure
*************************************************************/
static VOID OnReceiveStopped(VOID *p)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)p;
DEBUG_ENTRY(0);
NdisSetEvent(&pContext->HaltEvent);
}
/*************************************************************
Callback of delayed send pause procedure
*************************************************************/
static VOID OnSendStopped(VOID *p)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)p;
DEBUG_ENTRY(0);
NdisSetEvent(&pContext->HaltEvent);
}
static void WaitHaltEvent(PARANDIS_ADAPTER *pContext, const char *Reason)
{
UINT ms = 5000;
if (!NdisWaitEvent(&pContext->HaltEvent, 1))
{
while (!NdisWaitEvent(&pContext->HaltEvent, ms))
{
DPrintf(0, ("[%s]", __FUNCTION__));
}
}
}
/*************************************************************
Required NDIS procedure
Stops TX and RX path and finished the function of adapter
*************************************************************/
static VOID NTAPI ParaNdis5_Halt(
IN NDIS_HANDLE MiniportAdapterContext)
{
NDIS_STATUS status = NDIS_STATUS_SUCCESS;
BOOLEAN bUnused;
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext;
DEBUG_ENTRY(0);
ParaNdis_DebugHistory(pContext, hopHalt, NULL, 1, 0, 0);
NdisCancelTimer(&pContext->ConnectTimer, &bUnused);
NdisResetEvent(&pContext->HaltEvent);
if (NDIS_STATUS_PENDING != ParaNdis5_StopSend(pContext, TRUE, OnSendStopped))
NdisSetEvent(&pContext->HaltEvent);
WaitHaltEvent(pContext, "Send");
NdisResetEvent(&pContext->HaltEvent);
if (NDIS_STATUS_PENDING != ParaNdis5_StopReceive(pContext, TRUE, OnReceiveStopped))
NdisSetEvent(&pContext->HaltEvent);
WaitHaltEvent(pContext, "Receive");
ParaNdis_CleanupContext(pContext);
NdisCancelTimer(&pContext->DPCPostProcessTimer, &bUnused);
ParaNdis_DebugHistory(pContext, hopHalt, NULL, 0, 0, 0);
ParaNdis_DebugRegisterMiniport(pContext, FALSE);
NdisFreeMemory(pContext, 0, 0);
DEBUG_EXIT_STATUS(0, status);
}
/*************************************************************
Called periodically (usually each 2 seconds)
*************************************************************/
static BOOLEAN NTAPI ParaNdis5_CheckForHang(IN NDIS_HANDLE MiniportAdapterContext)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext;
DEBUG_ENTRY(8);
return ParaNdis_CheckForHang(pContext);
}
/*************************************************************
Required NDIS procedure
Responsible for hardware interrupt handling
*************************************************************/
static VOID NTAPI ParaNdis5_MiniportISR(OUT PBOOLEAN InterruptRecognized,
OUT PBOOLEAN QueueMiniportHandleInterrupt,
IN NDIS_HANDLE MiniportAdapterContext)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext;
BOOLEAN b;
*QueueMiniportHandleInterrupt = FALSE;
b = ParaNdis_OnLegacyInterrupt(pContext, QueueMiniportHandleInterrupt);
*InterruptRecognized = b;
DEBUG_EXIT_STATUS(7, (ULONG)b);
}
/*************************************************************
Parameters:
Return value:
*************************************************************/
VOID NTAPI ParaNdis5_PnPEventNotify(IN NDIS_HANDLE MiniportAdapterContext,
IN NDIS_DEVICE_PNP_EVENT PnPEvent,
IN PVOID InformationBuffer,
IN ULONG InformationBufferLength)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext;
ParaNdis_OnPnPEvent(pContext, PnPEvent, InformationBuffer, InformationBufferLength);
}
/*************************************************************
Driver's entry point
Parameters:
as usual
Return value:
SUCCESS or error code
*************************************************************/
NDIS_STATUS NTAPI DriverEntry(PVOID DriverObject,PVOID RegistryPath)
{
NDIS_STATUS status;
NDIS_MINIPORT_CHARACTERISTICS chars;
ParaNdis_DebugInitialize(DriverObject, RegistryPath);
status = NDIS_STATUS_FAILURE;
DEBUG_ENTRY(0);
_LogOutString(0, __DATE__ " " __TIME__);
NdisMInitializeWrapper(&DriverHandle,
DriverObject,
RegistryPath,
NULL
);
if (DriverHandle)
{
NdisZeroMemory(&chars, sizeof(chars));
//NDIS version of the miniport
chars.MajorNdisVersion = NDIS_MINIPORT_MAJOR_VERSION;
chars.MinorNdisVersion = NDIS_MINIPORT_MINOR_VERSION;
//Init and destruction
chars.InitializeHandler = ParaNdis5_Initialize;
chars.HaltHandler = ParaNdis5_Halt;
//Interrupt and DPC handling
chars.HandleInterruptHandler = ParaNdis5_HandleDPC;
chars.ISRHandler = ParaNdis5_MiniportISR;
//Packet transfer - send path and notification on the send packet
chars.SendPacketsHandler = ParaNdis5_SendPackets;
chars.ReturnPacketHandler = ParaNdis5_ReturnPacket;
//OID set\get
chars.SetInformationHandler = ParaNdis5_SetOID;
chars.QueryInformationHandler = ParaNdis5_QueryOID;
//Reset
chars.ResetHandler = ParaNdis5_Reset;
chars.CheckForHangHandler = ParaNdis5_CheckForHang; //optional
chars.CancelSendPacketsHandler = ParaNdis5_CancelSendPackets;
chars.PnPEventNotifyHandler = ParaNdis5_PnPEventNotify;
chars.AdapterShutdownHandler = ParaNdis5_Shutdown;
status = NdisMRegisterMiniport(
DriverHandle,
&chars,
sizeof(chars));
}
if (status == NDIS_STATUS_SUCCESS)
{
NdisMRegisterUnloadHandler(DriverHandle, ParaVirtualNICUnload);
}
else if (DriverHandle)
{
DPrintf(0, ("NdisMRegisterMiniport failed"));
NdisTerminateWrapper(DriverHandle, NULL);
}
else
{
DPrintf(0, ("NdisMInitializeWrapper failed"));
}
DEBUG_EXIT_STATUS(status ? 0 : 4, status);
return status;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,785 @@
/*
* This file contains NDIS5.X implementation of
* OID-related adapter driver procedures
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "ParaNdis5.h"
#include "ParaNdis-Oid.h"
#ifdef WPP_EVENT_TRACING
#include "ParaNdis5-Oid.tmh"
#endif
#define OIDENTRY(oid, el, xfl, xokl, flags) \
{ oid, el, xfl, xokl, flags, NULL }
#define OIDENTRYPROC(oid, el, xfl, xokl, flags, setproc) \
{ oid, el, xfl, xokl, flags, setproc }
static NDIS_TASK_OFFLOAD_HEADER ReservedHeader =
{
NDIS_TASK_OFFLOAD_VERSION,
sizeof(NDIS_TASK_OFFLOAD_HEADER),
0,
0,
{ IEEE_802_3_Encapsulation, { 1, 0 }, 0 }
};
static NDIS_OID SupportedOids[] = {
OID_GEN_SUPPORTED_LIST,
OID_GEN_HARDWARE_STATUS,
OID_GEN_MEDIA_SUPPORTED,
OID_GEN_MEDIA_IN_USE,
OID_GEN_MAXIMUM_LOOKAHEAD,
OID_GEN_MAXIMUM_FRAME_SIZE,
OID_GEN_LINK_SPEED,
OID_GEN_TRANSMIT_BUFFER_SPACE,
OID_GEN_RECEIVE_BUFFER_SPACE,
OID_GEN_TRANSMIT_BLOCK_SIZE,
OID_GEN_RECEIVE_BLOCK_SIZE,
OID_GEN_VENDOR_ID,
OID_GEN_VENDOR_DESCRIPTION,
OID_GEN_VENDOR_DRIVER_VERSION,
OID_GEN_CURRENT_PACKET_FILTER,
OID_GEN_CURRENT_LOOKAHEAD,
OID_GEN_DRIVER_VERSION,
OID_GEN_MAXIMUM_TOTAL_SIZE,
OID_GEN_PROTOCOL_OPTIONS,
OID_GEN_MAC_OPTIONS,
OID_GEN_MEDIA_CONNECT_STATUS,
OID_GEN_MAXIMUM_SEND_PACKETS,
OID_GEN_XMIT_OK,
OID_GEN_RCV_OK,
OID_GEN_VLAN_ID,
OID_GEN_XMIT_ERROR,
OID_GEN_RCV_ERROR,
OID_GEN_RCV_NO_BUFFER,
OID_GEN_RCV_CRC_ERROR,
OID_GEN_TRANSMIT_QUEUE_LENGTH,
OID_802_3_PERMANENT_ADDRESS,
OID_802_3_CURRENT_ADDRESS,
OID_802_3_MULTICAST_LIST,
OID_802_3_MAC_OPTIONS,
OID_802_3_MAXIMUM_LIST_SIZE,
OID_802_3_RCV_ERROR_ALIGNMENT,
OID_802_3_XMIT_ONE_COLLISION,
OID_802_3_XMIT_MORE_COLLISIONS,
OID_802_3_XMIT_DEFERRED,
OID_802_3_XMIT_MAX_COLLISIONS,
OID_802_3_RCV_OVERRUN,
OID_802_3_XMIT_UNDERRUN,
OID_802_3_XMIT_HEARTBEAT_FAILURE,
OID_802_3_XMIT_TIMES_CRS_LOST,
OID_802_3_XMIT_LATE_COLLISIONS,
OID_PNP_CAPABILITIES,
OID_PNP_SET_POWER,
OID_PNP_QUERY_POWER,
OID_PNP_ADD_WAKE_UP_PATTERN,
OID_PNP_REMOVE_WAKE_UP_PATTERN,
OID_PNP_ENABLE_WAKE_UP,
OID_TCP_TASK_OFFLOAD
};
static NDIS_STATUS OnOidSetNdis5Offload(PARANDIS_ADAPTER *pContext, tOidDesc *pOid);
static NDIS_STATUS CreateOffloadInfo5ForQuery(PARANDIS_ADAPTER *pContext, tOidDesc *pOid, PVOID *ppInfo, PULONG pulSize);
static NDIS_STATUS CreateOffloadInfo5Internal(PARANDIS_ADAPTER *pContext, PVOID *ppInfo, PULONG pulSize, PCCHAR reason, NDIS_TASK_OFFLOAD_HEADER *pHeader);
/**********************************************************
Structure defining how to process all the oids
***********************************************************/
// oid e f ok flags set procedure
static const tOidWhatToDo OidsDB[] =
{
OIDENTRY(OID_GEN_SUPPORTED_LIST, 2,2,4, ohfQueryStat ),
OIDENTRY(OID_GEN_HARDWARE_STATUS, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_MEDIA_SUPPORTED, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_MEDIA_IN_USE, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_MAXIMUM_LOOKAHEAD, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_MAXIMUM_FRAME_SIZE, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_LINK_SPEED, 6,0,6, ohfQuery ),
OIDENTRY(OID_GEN_TRANSMIT_BUFFER_SPACE, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_RECEIVE_BUFFER_SPACE, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_TRANSMIT_BLOCK_SIZE, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_RECEIVE_BLOCK_SIZE, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_VENDOR_ID, 2,0,4, ohfQueryStat ),
OIDENTRY(OID_GEN_VENDOR_DESCRIPTION, 2,2,4, ohfQuery ),
OIDENTRYPROC(OID_GEN_CURRENT_PACKET_FILTER, 2,0,4, ohfQuerySet, ParaNdis_OnSetPacketFilter),
OIDENTRYPROC(OID_GEN_CURRENT_LOOKAHEAD, 2,0,4, ohfQuerySet, ParaNdis_OnSetLookahead),
OIDENTRY(OID_GEN_DRIVER_VERSION, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_MAXIMUM_TOTAL_SIZE, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_PROTOCOL_OPTIONS, 2,0,4, 0 ),
OIDENTRY(OID_GEN_MAC_OPTIONS, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_MEDIA_CONNECT_STATUS, 6,0,6, ohfQuery ),
OIDENTRY(OID_GEN_MAXIMUM_SEND_PACKETS, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_VENDOR_DRIVER_VERSION, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_SUPPORTED_GUIDS, 2,2,4, 0 ),
OIDENTRY(OID_GEN_TRANSPORT_HEADER_OFFSET, 2,4,4, 0 ),
OIDENTRY(OID_GEN_MEDIA_CAPABILITIES, 2,4,4, 0 ),
OIDENTRY(OID_GEN_PHYSICAL_MEDIUM, 2,4,4, 0 ),
OIDENTRY(OID_GEN_XMIT_OK, 6,0,6, ohfQuery3264 ),
OIDENTRY(OID_GEN_RCV_OK, 6,0,4, ohfQuery3264 ),
OIDENTRY(OID_GEN_XMIT_ERROR, 6,0,6, ohfQuery3264 ),
OIDENTRY(OID_GEN_RCV_ERROR, 6,0,6, ohfQuery3264 ),
OIDENTRY(OID_GEN_RCV_NO_BUFFER, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_GEN_DIRECTED_BYTES_XMIT, 2,4,4, 0 ),
OIDENTRY(OID_GEN_DIRECTED_FRAMES_XMIT, 2,4,4, 0 ),
OIDENTRY(OID_GEN_MULTICAST_BYTES_XMIT, 2,4,4, 0 ),
OIDENTRY(OID_GEN_MULTICAST_FRAMES_XMIT, 2,4,4, 0 ),
OIDENTRY(OID_GEN_BROADCAST_BYTES_XMIT, 2,4,4, 0 ),
OIDENTRY(OID_GEN_BROADCAST_FRAMES_XMIT, 2,4,4, 0 ),
OIDENTRY(OID_GEN_DIRECTED_BYTES_RCV, 2,4,4, 0 ),
OIDENTRY(OID_GEN_DIRECTED_FRAMES_RCV, 2,4,4, 0 ),
OIDENTRY(OID_GEN_MULTICAST_BYTES_RCV, 2,4,4, 0 ),
OIDENTRY(OID_GEN_MULTICAST_FRAMES_RCV, 2,4,4, 0 ),
OIDENTRY(OID_GEN_BROADCAST_BYTES_RCV, 2,4,4, 0 ),
OIDENTRY(OID_GEN_BROADCAST_FRAMES_RCV, 2,4,4, 0 ),
OIDENTRY(OID_GEN_RCV_CRC_ERROR, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_GEN_TRANSMIT_QUEUE_LENGTH, 2,0,4, ohfQuery ),
OIDENTRY(OID_GEN_GET_TIME_CAPS, 2,4,4, 0 ),
OIDENTRY(OID_GEN_GET_NETCARD_TIME, 2,4,4, 0 ),
OIDENTRY(OID_GEN_NETCARD_LOAD, 2,4,4, 0 ),
OIDENTRY(OID_GEN_DEVICE_PROFILE, 2,4,4, 0 ),
OIDENTRY(OID_GEN_INIT_TIME_MS, 2,4,4, 0 ),
OIDENTRY(OID_GEN_RESET_COUNTS, 2,4,4, 0 ),
OIDENTRY(OID_GEN_MEDIA_SENSE_COUNTS, 2,4,4, 0 ),
OIDENTRY(OID_PNP_CAPABILITIES, 2,0,4, ohfQuery ),
OIDENTRY(OID_PNP_QUERY_POWER, 2,0,4, ohfQuery ),
OIDENTRY(OID_802_3_PERMANENT_ADDRESS, 2,0,4, ohfQueryStat ),
OIDENTRY(OID_802_3_CURRENT_ADDRESS, 2,0,4, ohfQueryStat ),
OIDENTRY(OID_802_3_MAXIMUM_LIST_SIZE, 2,0,4, ohfQuery ),
OIDENTRY(OID_802_3_MAC_OPTIONS, 2,4,4, ohfQuery ),
OIDENTRY(OID_802_3_RCV_ERROR_ALIGNMENT, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_802_3_XMIT_ONE_COLLISION, 2,4,4, ohfQuery3264 ),
OIDENTRY(OID_802_3_XMIT_MORE_COLLISIONS, 2,4,4, ohfQuery3264 ),
OIDENTRY(OID_802_3_XMIT_DEFERRED, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_802_3_XMIT_MAX_COLLISIONS, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_802_3_RCV_OVERRUN, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_802_3_XMIT_UNDERRUN, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_802_3_XMIT_HEARTBEAT_FAILURE, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_802_3_XMIT_TIMES_CRS_LOST, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_802_3_XMIT_LATE_COLLISIONS, 2,0,4, ohfQuery3264 ),
OIDENTRY(OID_GEN_MACHINE_NAME, 2,4,4, 0 ),
OIDENTRY(OID_IP4_OFFLOAD_STATS, 4,4,4, 0 ),
OIDENTRY(OID_IP6_OFFLOAD_STATS, 4,4,4, 0 ),
OIDENTRY(OID_802_11_CAPABILITY, 4,4,4, 0 ),
OIDENTRYPROC(OID_PNP_ADD_WAKE_UP_PATTERN, 2,0,4, ohfSet, ParaNdis_OnAddWakeupPattern),
OIDENTRYPROC(OID_PNP_REMOVE_WAKE_UP_PATTERN, 2,0,4, ohfSet, ParaNdis_OnRemoveWakeupPattern),
OIDENTRYPROC(OID_PNP_ENABLE_WAKE_UP, 2,0,4, ohfQuerySet, ParaNdis_OnEnableWakeup),
OIDENTRYPROC(OID_PNP_SET_POWER, 2,0,4, ohfSet | ohfSetMoreOK, ParaNdis_OnSetPower),
OIDENTRYPROC(OID_GEN_CURRENT_LOOKAHEAD, 2,0,4, ohfQuerySet, ParaNdis_OnSetLookahead),
OIDENTRYPROC(OID_GEN_CURRENT_PACKET_FILTER, 2,0,4, ohfQuerySet, ParaNdis_OnSetPacketFilter),
OIDENTRYPROC(OID_802_3_MULTICAST_LIST, 2,0,4, ohfQuerySet, ParaNdis_OnOidSetMulticastList),
OIDENTRY(OID_FFP_SUPPORT, 2,4,4, 0 ),
OIDENTRYPROC(OID_TCP_TASK_OFFLOAD, 0,0,0, ohfQuerySet, OnOidSetNdis5Offload),
OIDENTRYPROC(OID_GEN_VLAN_ID, 0,4,4, ohfQuerySet, ParaNdis_OnSetVlanId),
OIDENTRY(0x00010203 /*(OID_GEN_RECEIVE_SCALE_CAPABILITIES)*/, 2,4,4, 0 ),
OIDENTRY(0x0001021F /*(OID_GEN_RECEIVE_HASH)*/, 2,4,4, 0 ),
OIDENTRY(0, 4,4,4, 0),
};
/**********************************************************
Returns to common query processor the array of supported oids
***********************************************************/
void ParaNdis_GetSupportedOid(PVOID *pOidsArray, PULONG pLength)
{
*pOidsArray = SupportedOids;
*pLength = sizeof(SupportedOids);
}
/*****************************************************************
Handles NDIS5 specific OID, all the rest handled by common handler
*****************************************************************/
static NDIS_STATUS ParaNdis_OidQuery(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
NDIS_STATUS status;
BOOLEAN bFreeInfo = FALSE;
PVOID pInfo = NULL;
ULONG ulSize = 0;
ULONG ulLinkSpeed = 0;
switch(pOid->Oid)
{
case OID_TCP_TASK_OFFLOAD:
status = CreateOffloadInfo5ForQuery(pContext, pOid, &pInfo, &ulSize);
bFreeInfo = pInfo != NULL;
break;
case OID_GEN_LINK_SPEED:
{
/* units are 100 bps */
ulLinkSpeed = (ULONG)(PARANDIS_FORMAL_LINK_SPEED / 100);
pInfo = &ulLinkSpeed;
ulSize = sizeof(ulLinkSpeed);
status = NDIS_STATUS_SUCCESS;
}
break;
default:
return ParaNdis_OidQueryCommon(pContext, pOid);
}
if (status == NDIS_STATUS_SUCCESS)
{
status = ParaNdis_OidQueryCopy(pOid, pInfo, ulSize, bFreeInfo);
}
else if (bFreeInfo)
{
NdisFreeMemory(pInfo, 0, 0);
}
return status;
}
/**********************************************************
NDIS required procedure of OID QUERY
Just passes all the supported oids to common query procedure
Return value:
NDIS_STATUS as returned from common code
NDIS_STATUS_NOT_SUPPORTED if suppressed in the table
***********************************************************/
NDIS_STATUS NTAPI ParaNdis5_QueryOID(IN NDIS_HANDLE MiniportAdapterContext,
IN NDIS_OID Oid,
IN PVOID InformationBuffer,
IN ULONG InformationBufferLength,
OUT PULONG BytesWritten,
OUT PULONG BytesNeeded)
{
NDIS_STATUS status = NDIS_STATUS_NOT_SUPPORTED;
tOidWhatToDo Rules;
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext;
tOidDesc _oid;
ParaNdis_GetOidSupportRules(Oid, &Rules, OidsDB);
_oid.ulToDoFlags = Rules.Flags;
*BytesWritten = 0;
*BytesNeeded = 0;
ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, Oid, 0, 1);
DPrintf(Rules.nEntryLevel, ("[%s], id 0x%X(%s) of %d", __FUNCTION__,
Oid,
Rules.name,
InformationBufferLength));
_oid.Oid = Oid;
_oid.InformationBuffer = InformationBuffer;
_oid.InformationBufferLength = InformationBufferLength;
_oid.pBytesNeeded = (PUINT)BytesNeeded;
_oid.pBytesRead = (PUINT)BytesWritten;
_oid.pBytesWritten = (PUINT)BytesWritten;
if (pContext->bSurprizeRemoved) status = NDIS_STATUS_NOT_ACCEPTED;
else if (Rules.Flags & ohfQuery) status = ParaNdis_OidQuery(pContext, &_oid);
ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, Oid, status, 0);
DPrintf((status != NDIS_STATUS_SUCCESS) ? Rules.nExitFailLevel : Rules.nExitOKLevel,
("[%s] , id 0x%X(%s) (%X), written %d, needed %d",
__FUNCTION__,
Rules.oid,
Rules.name,
status,
*BytesWritten,
*BytesNeeded));
return status;
}
/**********************************************************
NDIS required procedure of OID SET
Just passes all the supported oids to common set procedure
Return value:
NDIS_STATUS as returned from set procedure
NDIS_STATUS_NOT_SUPPORTED if support not defined in the table
***********************************************************/
NDIS_STATUS NTAPI ParaNdis5_SetOID(IN NDIS_HANDLE MiniportAdapterContext,
IN NDIS_OID Oid,
IN PVOID InformationBuffer,
IN ULONG InformationBufferLength,
OUT PULONG BytesRead,
OUT PULONG BytesNeeded)
{
NDIS_STATUS status = NDIS_STATUS_NOT_SUPPORTED;
tOidWhatToDo Rules;
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext;
tOidDesc _oid;
ParaNdis_GetOidSupportRules(Oid, &Rules, OidsDB);
_oid.ulToDoFlags = Rules.Flags;
*BytesRead = 0;
*BytesNeeded = 0;
ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, Oid, 1, 1);
DPrintf(Rules.nEntryLevel, ("[%s], id 0x%X(%s) of %d", __FUNCTION__,
Oid,
Rules.name,
InformationBufferLength));
_oid.Oid = Oid;
_oid.InformationBuffer = InformationBuffer;
_oid.InformationBufferLength = InformationBufferLength;
_oid.pBytesNeeded = (PUINT)BytesNeeded;
_oid.pBytesRead = (PUINT)BytesRead;
_oid.pBytesWritten = (PUINT)BytesRead;
if (pContext->bSurprizeRemoved) status = NDIS_STATUS_NOT_ACCEPTED;
else if (Rules.Flags & ohfSet)
{
if (Rules.OidSetProc) status = Rules.OidSetProc(pContext, &_oid);
else
{
DPrintf(0, ("[%s] ERROR in OID redirection table", __FUNCTION__));
status = NDIS_STATUS_INVALID_OID;
}
}
ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, Oid, status, 0);
if (status != NDIS_STATUS_PENDING)
{
DPrintf((status != NDIS_STATUS_SUCCESS) ? Rules.nExitFailLevel : Rules.nExitOKLevel,
("[%s] , id 0x%X(%s) (%X), read %d, needed %d", __FUNCTION__,
Rules.oid, Rules.name, status, *BytesRead, *BytesNeeded));
}
return status;
}
static void NTAPI OnSetPowerWorkItem(NDIS_WORK_ITEM * pWorkItem, PVOID Context)
{
NDIS_STATUS status = NDIS_STATUS_SUCCESS;
tPowerWorkItem *pwi = (tPowerWorkItem *)pWorkItem;
PARANDIS_ADAPTER *pContext = pwi->pContext;
if (pwi->state == (NDIS_DEVICE_POWER_STATE)NetDeviceStateD0)
{
status = ParaNdis_PowerOn(pContext);
}
else
{
ParaNdis_PowerOff(pContext);
}
NdisFreeMemory(pwi, 0, 0);
ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, OID_PNP_SET_POWER, 0, 2);
NdisMSetInformationComplete(pContext->MiniportHandle, status);
}
/**********************************************************
NDIS5.X handler of power management
***********************************************************/
NDIS_STATUS ParaNdis_OnSetPower(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
NDIS_STATUS status;
NDIS_DEVICE_POWER_STATE newState;
DEBUG_ENTRY(0);
status = ParaNdis_OidSetCopy(pOid, &newState, sizeof(newState));
if (status == NDIS_STATUS_SUCCESS)
{
tPowerWorkItem *pwi = ParaNdis_AllocateMemory(pContext, sizeof(tPowerWorkItem));
status = NDIS_STATUS_FAILURE;
if (pwi)
{
pwi->pContext = pContext;
pwi->state = newState;
NdisInitializeWorkItem(&pwi->wi, OnSetPowerWorkItem, pwi);
if (NdisScheduleWorkItem(&pwi->wi) == NDIS_STATUS_SUCCESS)
{
status = NDIS_STATUS_PENDING;
}
else
NdisFreeMemory(pwi, 0, 0);
}
}
return status;
}
/***************************************************
check that the incoming NDIS_TASK_TCP_IP_CHECKSUM
does not enable options which we do not support
***************************************************/
static BOOLEAN IsValidPcs( PARANDIS_ADAPTER *pContext, NDIS_TASK_TCP_IP_CHECKSUM *pcs)
{
tOffloadSettingsFlags f;
BOOLEAN bInvalid = FALSE;
ParaNdis_ResetOffloadSettings(pContext, &f, NULL);
bInvalid |= pcs->V4Receive.IpChecksum && !f.fRxIPChecksum;
bInvalid |= pcs->V4Receive.IpOptionsSupported && !f.fRxIPOptions;
bInvalid |= pcs->V4Receive.TcpChecksum && !f.fRxTCPChecksum;
bInvalid |= pcs->V4Receive.TcpOptionsSupported && !f.fRxTCPOptions;
bInvalid |= pcs->V4Receive.UdpChecksum && !f.fRxUDPChecksum;
bInvalid |= pcs->V4Transmit.IpChecksum && !f.fTxIPChecksum;
bInvalid |= pcs->V4Transmit.IpOptionsSupported && !f.fTxIPOptions;
bInvalid |= pcs->V4Transmit.TcpChecksum && !f.fTxTCPChecksum;
bInvalid |= pcs->V4Transmit.TcpOptionsSupported && !f.fTxTCPOptions;
bInvalid |= pcs->V4Transmit.UdpChecksum && !f.fTxUDPChecksum;
return !bInvalid;
}
/***************************************************
check that the incoming NDIS_TASK_TCP_LARGE_SEND
does not enable options which we do not support
***************************************************/
static BOOLEAN IsValidPls( PARANDIS_ADAPTER *pContext, NDIS_TASK_TCP_LARGE_SEND *pls)
{
tOffloadSettingsFlags f;
BOOLEAN bInvalid = FALSE;
ParaNdis_ResetOffloadSettings(pContext, &f, NULL);
bInvalid |= pls->Version != NDIS_TASK_TCP_LARGE_SEND_V0;
bInvalid |= pls->IpOptions && !f.fTxLsoIP;
bInvalid |= pls->TcpOptions && !f.fTxLsoTCP;
bInvalid |= (pls->IpOptions || pls->TcpOptions || pls->MaxOffLoadSize) && !f.fTxLso;
bInvalid |= pls->MinSegmentCount < PARANDIS_MIN_LSO_SEGMENTS;
return !bInvalid;
}
static NDIS_STATUS ParseOffloadTask(
PARANDIS_ADAPTER *pContext,
BOOLEAN bApply, /* for 'set'*/
NDIS_TASK_OFFLOAD *pto,
ULONG offset,
ULONG maxSize)
{
NDIS_STATUS status = NDIS_STATUS_SUCCESS;
NDIS_TASK_TCP_IP_CHECKSUM *pcs = NULL;
NDIS_TASK_TCP_LARGE_SEND *pls = NULL;
NDIS_TASK_IPSEC *pips = NULL;
LPCSTR sName = NULL;
ULONG TaskBufferSize = 0, tailOffset = 0;
switch(pto->Task)
{
case TcpIpChecksumNdisTask:
pcs = (NDIS_TASK_TCP_IP_CHECKSUM *)pto->TaskBuffer;
TaskBufferSize = sizeof(*pcs);
sName = "TcpIpChecksumNdisTask";
break;
case TcpLargeSendNdisTask:
pls = (NDIS_TASK_TCP_LARGE_SEND *)pto->TaskBuffer;
TaskBufferSize = sizeof(*pls);
sName = "TcpLargeSendNdisTask";
break;
case IpSecNdisTask:
pips = (NDIS_TASK_IPSEC *)pto->TaskBuffer;
TaskBufferSize = sizeof(*pips);
sName = "IpSecNdisTask";
break;
default:
break;
}
tailOffset = offset + RtlPointerToOffset(pto, &pto->TaskBuffer) + TaskBufferSize;
if (!TaskBufferSize)
{
DPrintf(0, ("[%s], unknown offload task %d", __FUNCTION__, pto->Task));
}
else if (tailOffset > maxSize)
{
DPrintf(0, ("[%s], can not parse %s at offset %d, tail at %d", __FUNCTION__, sName, offset, tailOffset));
status = NDIS_STATUS_BUFFER_TOO_SHORT;
}
else if (TaskBufferSize > pto->TaskBufferLength)
{
DPrintf(0, ("[%s], invalid size of %s", __FUNCTION__, sName));
status = NDIS_STATUS_BUFFER_TOO_SHORT;
}
else if (pcs)
{
DPrintf(0, ("[%s], parsing %s", __FUNCTION__, sName));
DPrintf(0, ("Rx4: checksum IP(%d),TCP(%d),UDP(%d), options IP(%d),TCP(%d)",
pcs->V4Receive.IpChecksum, pcs->V4Receive.TcpChecksum, pcs->V4Receive.UdpChecksum,
pcs->V4Receive.IpOptionsSupported, pcs->V4Receive.TcpOptionsSupported
));
DPrintf(0, ("Tx4: checksum IP(%d),TCP(%d),UDP(%d), options IP(%d),TCP(%d)",
pcs->V4Transmit.IpChecksum, pcs->V4Transmit.TcpChecksum, pcs->V4Transmit.UdpChecksum,
pcs->V4Transmit.IpOptionsSupported, pcs->V4Transmit.TcpOptionsSupported
));
if (bApply)
{
if (IsValidPcs(pContext, pcs))
{
tOffloadSettingsFlags *pf = &pContext->Offload.flags;
pf->fTxIPChecksum = !!pcs->V4Transmit.IpChecksum;
pf->fTxTCPChecksum = !!pcs->V4Transmit.TcpChecksum;
pf->fTxUDPChecksum = !!pcs->V4Transmit.UdpChecksum;
pf->fTxTCPOptions = !!pcs->V4Transmit.TcpOptionsSupported;
pf->fTxIPOptions = !!pcs->V4Transmit.IpOptionsSupported;
pf->fRxIPChecksum = !!pcs->V4Receive.IpChecksum;
pf->fRxIPOptions = !!pcs->V4Receive.IpOptionsSupported;
pf->fRxTCPChecksum = !!pcs->V4Receive.TcpChecksum;
pf->fRxTCPOptions = !!pcs->V4Receive.TcpOptionsSupported;
pf->fRxUDPChecksum = !!pcs->V4Receive.UdpChecksum;
}
else
status = STATUS_NOT_SUPPORTED;
}
}
else if (pls)
{
DPrintf(0, ("[%s], parsing %s version %d", __FUNCTION__, sName, pls->Version));
DPrintf(0, ("options IP(%d),TCP(%d),MaxOffload %d, MinSegments %d",
pls->IpOptions, pls->TcpOptions, pls->MaxOffLoadSize, pls->MinSegmentCount));
if (bApply)
{
if (IsValidPls(pContext, pls))
{
tOffloadSettingsFlags *pf = &pContext->Offload.flags;
pf->fTxLsoIP = !!pls->IpOptions;
pf->fTxLsoTCP = !!pls->TcpOptions;
pf->fTxLso = 1;
}
else
status = STATUS_NOT_SUPPORTED;
}
}
else if (pips)
{
DPrintf(0, ("[%s], parsing %s", __FUNCTION__, sName));
}
return status;
}
static FORCEINLINE BOOLEAN ValidateOffloadHeader(NDIS_TASK_OFFLOAD_HEADER *pth)
{
return
pth->EncapsulationFormat.Encapsulation == IEEE_802_3_Encapsulation &&
pth->Version == NDIS_TASK_OFFLOAD_VERSION &&
pth->Size == sizeof(*pth);
}
static NDIS_STATUS ParseOffload(
PARANDIS_ADAPTER *pContext,
NDIS_TASK_OFFLOAD_HEADER *pth,
ULONG size,
BOOLEAN bApply,
PCCHAR reason,
BOOLEAN headerOnly)
{
NDIS_STATUS status = NDIS_STATUS_NOT_SUPPORTED;
BOOLEAN bReset = FALSE;
ULONG ulNoCapabilities = 0;
DPrintf(0, ("[%s](%s), format %d", __FUNCTION__, reason,
pth->EncapsulationFormat.Encapsulation));
if (ValidateOffloadHeader(pth))
{
PUCHAR p = (PUCHAR)pth;
LONG offset = (LONG)pth->OffsetFirstTask;
status = NDIS_STATUS_SUCCESS;
DPrintf(0, ("[%s], header version %d, ip header at %d, fixed %d, first at %d", __FUNCTION__,
pth->Version,
pth->EncapsulationFormat.EncapsulationHeaderSize,
pth->EncapsulationFormat.Flags.FixedHeaderSize,
offset));
if (!offset && bApply)
{
/* disable all the capabilities */
// according to DDK, 0 at first task offset means disabling all the capabilities
DPrintf(0, ("[%s] RESETTING offload capabilities", __FUNCTION__));
ParaNdis_ResetOffloadSettings(pContext, NULL, &ulNoCapabilities);
bReset = TRUE;
}
while (!headerOnly && offset > 0 && (offset + sizeof(NDIS_TASK_OFFLOAD)) < size)
{
NDIS_TASK_OFFLOAD *pto = (NDIS_TASK_OFFLOAD *)(p + offset);
if (pto->Version != NDIS_TASK_OFFLOAD_VERSION)
{
DPrintf(0, ("[%s], unexpected TO version %d at %d",
__FUNCTION__, pto->Version, offset));
status = NDIS_STATUS_INVALID_DATA;
break;
}
status = ParseOffloadTask(pContext, bApply, pto, offset, size);
if (!pto->OffsetNextTask || status != NDIS_STATUS_SUCCESS)
break;
offset += pto->OffsetNextTask;
}
}
if (status == STATUS_SUCCESS && bApply)
pContext->Offload.ipHeaderOffset = bReset ? 0: pth->EncapsulationFormat.EncapsulationHeaderSize;
return status;
}
/********************************************************
Fill offload query structure according to our capabilities
********************************************************/
static BOOLEAN GetTcpIpCheckSumCapabilities(
PARANDIS_ADAPTER *pContext,
NDIS_TASK_TCP_IP_CHECKSUM *pcs)
{
tOffloadSettingsFlags f;
NdisZeroMemory(pcs, sizeof(*pcs));
ParaNdis_ResetOffloadSettings(pContext, &f, NULL);
pcs->V4Transmit.IpChecksum = !!f.fTxIPChecksum;
pcs->V4Transmit.TcpChecksum = !!f.fTxTCPChecksum;
pcs->V4Transmit.UdpChecksum = !!f.fTxUDPChecksum;
pcs->V4Transmit.IpOptionsSupported = !!f.fTxIPOptions;
pcs->V4Transmit.TcpOptionsSupported = !!f.fTxTCPOptions;
pcs->V4Receive.IpChecksum = !!f.fRxIPChecksum;
pcs->V4Receive.IpOptionsSupported = !!f.fRxIPOptions;
pcs->V4Receive.TcpChecksum = !!f.fRxTCPChecksum;
pcs->V4Receive.TcpOptionsSupported = !!f.fRxTCPOptions;
pcs->V4Receive.UdpChecksum = !!f.fRxUDPChecksum;
return
pcs->V4Transmit.IpChecksum ||
pcs->V4Transmit.TcpChecksum ||
pcs->V4Transmit.UdpChecksum ||
pcs->V4Receive.IpChecksum ||
pcs->V4Receive.TcpChecksum ||
pcs->V4Receive.UdpChecksum;
}
/********************************************************
Fill offload query structure according to our capabilities
********************************************************/
static BOOLEAN GetLargeSendCapabilities(
PARANDIS_ADAPTER *pContext,
NDIS_TASK_TCP_LARGE_SEND *pls)
{
tOffloadSettingsFlags f;
NdisZeroMemory(pls, sizeof(*pls));
ParaNdis_ResetOffloadSettings(pContext, &f, NULL);
pls->Version = NDIS_TASK_TCP_LARGE_SEND_V0;
pls->IpOptions = !!f.fTxLsoIP;
pls->TcpOptions = !!f.fTxLsoTCP;
pls->MinSegmentCount = PARANDIS_MIN_LSO_SEGMENTS;
pls->MaxOffLoadSize = pContext->Offload.maxPacketSize;
return f.fTxLso != 0;
}
/********************************************************
Allocate and fill our capabilities, dependent on registry setting
Note than NDIS test of WLK1.2 and 1.3 fail (offloadmisc)
if CS capability indicated and passes if only LSO indicated
********************************************************/
NDIS_STATUS CreateOffloadInfo5Internal(
PARANDIS_ADAPTER *pContext,
PVOID *ppInfo,
PULONG pulSize,
PCCHAR reason,
NDIS_TASK_OFFLOAD_HEADER *pHeader)
{
NDIS_STATUS status = NDIS_STATUS_RESOURCES;
ULONG size =
sizeof(NDIS_TASK_OFFLOAD_HEADER) +
sizeof(NDIS_TASK_OFFLOAD) + sizeof(NDIS_TASK_TCP_IP_CHECKSUM) +
sizeof(NDIS_TASK_OFFLOAD) + sizeof(NDIS_TASK_TCP_LARGE_SEND);
*ppInfo = ParaNdis_AllocateMemory(pContext, size);
if (*ppInfo)
{
ULONG flags = 0;
NDIS_TASK_TCP_IP_CHECKSUM cs;
NDIS_TASK_TCP_LARGE_SEND lso;
flags |= GetTcpIpCheckSumCapabilities(pContext, &cs) ? 2 : 0;
flags |= GetLargeSendCapabilities(pContext, &lso) ? 1 : 0;
if (flags)
{
NDIS_TASK_OFFLOAD_HEADER *ph;
NDIS_TASK_OFFLOAD *pto;
UINT i = 0;
ULONG *pOffset;
PVOID base;
*pulSize = size;
NdisZeroMemory(*ppInfo, size);
ph = (NDIS_TASK_OFFLOAD_HEADER *)*ppInfo;
*ph = *pHeader;
pto = (NDIS_TASK_OFFLOAD *)(ph + 1);
base = ph;
pOffset = &ph->OffsetFirstTask;
ph->OffsetFirstTask = 0;
do
{
if (flags & (1 << i))
{
flags &= ~(1 << i);
pto->Version = NDIS_TASK_OFFLOAD_VERSION;
pto->Size = sizeof(*pto);
*pOffset = RtlPointerToOffset(base, pto);
base = pto;
pOffset = &pto->OffsetNextTask;
switch(i)
{
case 1:
{
NDIS_TASK_TCP_IP_CHECKSUM *pcs = (NDIS_TASK_TCP_IP_CHECKSUM *)pto->TaskBuffer;
pto->Task = TcpIpChecksumNdisTask;
pto->TaskBufferLength = sizeof(*pcs);
NdisMoveMemory(pcs, &cs, sizeof(cs));
pto = (NDIS_TASK_OFFLOAD *)(pcs + 1);
break;
}
case 0:
{
NDIS_TASK_TCP_LARGE_SEND *pls = (NDIS_TASK_TCP_LARGE_SEND *)pto->TaskBuffer;
pto->Task = TcpLargeSendNdisTask;
pto->TaskBufferLength = sizeof(*pls);
NdisMoveMemory(pls, &lso, sizeof(lso));
pto = (NDIS_TASK_OFFLOAD *)(pls + 1);
break;
}
default:
break;
}
}
++i;
} while (flags);
status = ParseOffload(pContext, ph, size, FALSE, reason, FALSE);
}
else
{
NdisFreeMemory(*ppInfo, 0, 0);
*ppInfo = NULL;
status = NDIS_STATUS_NOT_SUPPORTED;
}
}
return status;
}
NDIS_STATUS CreateOffloadInfo5ForQuery(
PARANDIS_ADAPTER *pContext,
tOidDesc *pOid,
PVOID *ppInfo,
PULONG pulSize)
{
NDIS_TASK_OFFLOAD_HEADER *pth = (NDIS_TASK_OFFLOAD_HEADER *)pOid->InformationBuffer;
NDIS_STATUS status;
*ppInfo = NULL;
*pulSize = 0;
if (pOid->InformationBufferLength < sizeof(*pth)) pth = &ReservedHeader;
status = ParseOffload(pContext, pth, pOid->InformationBufferLength, FALSE, "query enter", TRUE);
if (status == NDIS_STATUS_SUCCESS)
{
CreateOffloadInfo5Internal(pContext, ppInfo, pulSize, "QUERY", pth);
}
return status;
}
NDIS_STATUS OnOidSetNdis5Offload(PARANDIS_ADAPTER *pContext, tOidDesc *pOid)
{
NDIS_STATUS status;
status = ParseOffload(pContext, (NDIS_TASK_OFFLOAD_HEADER *)pOid->InformationBuffer,
pOid->InformationBufferLength, TRUE, "SET", FALSE);
if (status == STATUS_SUCCESS)
{
#if 0 // only for logging after SET
PVOID pInfo = NULL;
ULONG dummy = 0;
CreateOffloadInfo5Internal(pContext, &pInfo, &dummy, "UPDATED", &ReservedHeader);
if (pInfo) NdisFreeMemory(pInfo, 0, 0);
#endif
*pOid->pBytesRead = pOid->InformationBufferLength;
}
else
{
DPrintf(0, ("[%s], restoring after unsuccessful set", __FUNCTION__));
pContext->Offload = pContext->Offload;
}
return status;
}

View file

@ -0,0 +1,88 @@
/*
* This file contains NDIS5.X specific procedure definitions in NDIS driver.
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _PARA_NDIS5_H
#define _PARA_NDIS5_H
#include "ndis56common.h"
NDIS_STATUS NTAPI ParaNdis5_SetOID(IN NDIS_HANDLE MiniportAdapterContext,
IN NDIS_OID Oid,
IN PVOID InformationBuffer,
IN ULONG InformationBufferLength,
OUT PULONG BytesRead,
OUT PULONG BytesNeeded);
NDIS_STATUS NTAPI ParaNdis5_QueryOID(IN NDIS_HANDLE MiniportAdapterContext,
IN NDIS_OID Oid,
IN PVOID InformationBuffer,
IN ULONG InformationBufferLength,
OUT PULONG BytesWritten,
OUT PULONG BytesNeeded);
VOID NTAPI ParaNdis5_SendPackets(IN NDIS_HANDLE MiniportAdapterContext,
IN PPNDIS_PACKET PacketArray,
IN UINT NumberOfPackets);
VOID NTAPI ParaNdis5_ReturnPacket(IN NDIS_HANDLE MiniportAdapterContext,IN PNDIS_PACKET Packet);
VOID ParaNdis5_IndicateConnect(PARANDIS_ADAPTER *pContext, BOOLEAN bConnected);
//NDIS 5.1 related functions
VOID NTAPI ParaNdis5_CancelSendPackets(IN NDIS_HANDLE MiniportAdapterContext,IN PVOID CancelId);
NDIS_STATUS ParaNdis5_StopSend(
PARANDIS_ADAPTER *pContext,
BOOLEAN bStop,
ONPAUSECOMPLETEPROC Callback);
NDIS_STATUS ParaNdis5_StopReceive(
PARANDIS_ADAPTER *pContext,
BOOLEAN bStop,
ONPAUSECOMPLETEPROC Callback
);
VOID NTAPI ParaNdis5_HandleDPC(
IN NDIS_HANDLE MiniportAdapterContext);
typedef struct _tagPowerWorkItem
{
NDIS_WORK_ITEM wi;
PPARANDIS_ADAPTER pContext;
NDIS_DEVICE_POWER_STATE state;
}tPowerWorkItem;
typedef struct _tagGeneralWorkItem
{
NDIS_WORK_ITEM wi;
PPARANDIS_ADAPTER pContext;
}tGeneralWorkItem;
#endif // _PARA_NDIS5_H

View file

@ -0,0 +1,331 @@
;/*++
;INX_COPYRIGHT_1
;INX_COPYRIGHT_2
;
;Module Name:
; netkvm.inf
;
;Abstract:
; VirtIO Ethernet Adapter
;
;Installation Notes:
; Using Devcon: Type "devcon install netkvm.inf PCI\VEN_1AF4&DEV_1000&SUBSYS_0001_INX_SUBSYS_VENDOR_ID&REV_00" or
; "devcon install netkvm.inf PCI\VEN_1AF4&DEV_1041&SUBSYS_1100_INX_SUBSYS_VENDOR_ID&REV_01" to install
;
;--*/
[version]
Signature = "$Windows NT$"
Class = Net
CatalogFile = netkvm.cat
ClassGUID = {4d36e972-e325-11ce-bfc1-08002be10318}
Provider = %VENDOR%
DriverVer=01/01/2008,0.0.0.1 ; this line will be replaced with stampinf
DriverPackageType = PlugAndPlay
DriverPackageDisplayName = %kvmnet5.DeviceDesc%
[Manufacturer]
%VENDOR% = NetKVM, NT$ARCH$
[NetKVM.NT$ARCH$]
%kvmnet5.DeviceDesc% = kvmnet5.ndi, PCI\VEN_1AF4&DEV_1000&SUBSYS_0001_INX_SUBSYS_VENDOR_ID&REV_00, PCI\VEN_1AF4&DEV_1000
%kvmnet5.DeviceDesc% = kvmnet5.ndi, PCI\VEN_1AF4&DEV_1041&SUBSYS_1100_INX_SUBSYS_VENDOR_ID&REV_01, PCI\VEN_1AF4&DEV_1041
[kvmnet5.ndi]
Characteristics = 0x84 ; NCF_PHYSICAL | NCF_HAS_UI
BusType = 5 ; PCI
AddReg = kvmnet5.Reg, Parameters
CopyFiles = kvmnet5.CopyFiles
*IfType = 6
*MediaType = 0 ; NdisMedium802_3
*PhysicalMediaType = 0 ; NdisPhysicalMediumUnspecified
[kvmnet5.ndi.Services]
AddService = netkvm, 2, kvmnet5.Service, kvmnet5.EventLog
[kvmnet5.Reg]
HKR, , BusNumber, 0, "0"
HKR, Ndi, Service, 0, "netkvm"
HKR, Ndi\Interfaces, UpperRange, 0, "ndis5"
HKR, Ndi\Interfaces, LowerRange, 0, "ethernet"
[Parameters]
HKR, Ndi\Params\ConnectRate, ParamDesc, 0, %ConnectRate%
HKR, Ndi\Params\ConnectRate, Default, 0, "1001"
HKR, Ndi\Params\ConnectRate, type, 0, "enum"
HKR, Ndi\Params\ConnectRate\enum, "10", 0, %10M%
HKR, Ndi\Params\ConnectRate\enum, "100", 0, %100M%
HKR, Ndi\Params\ConnectRate\enum, "1001", 0, %1G%
HKR, Ndi\Params\ConnectRate\enum, "10000", 0, %10G%
HKR, Ndi\Params\Priority, ParamDesc, 0, %Priority%
HKR, Ndi\Params\Priority, Default, 0, "1"
HKR, Ndi\Params\Priority, type, 0, "enum"
HKR, Ndi\Params\Priority\enum, "1", 0, %Enable%
HKR, Ndi\Params\Priority\enum, "0", 0, %Disable%
HKR, Ndi\Params\*PriorityVLANTag, ParamDesc, 0, %PriorityVlanTag%
HKR, Ndi\Params\*PriorityVLANTag, Default, 0, "3"
HKR, Ndi\Params\*PriorityVLANTag, type, 0, "enum"
HKR, Ndi\Params\*PriorityVLANTag\enum, "3", 0, %Priority_Vlan%
HKR, Ndi\Params\*PriorityVLANTag\enum, "2", 0, %VLan%
HKR, Ndi\Params\*PriorityVLANTag\enum, "1", 0, %PriorityOnly%
HKR, Ndi\Params\*PriorityVLANTag\enum, "0", 0, %Disable%
HKR, Ndi\Params\DoLog, ParamDesc, 0, %EnableLogging%
HKR, Ndi\Params\DoLog, Default, 0, "1"
HKR, Ndi\Params\DoLog, type, 0, "enum"
HKR, Ndi\Params\DoLog\enum, "1", 0, %Enable%
HKR, Ndi\Params\DoLog\enum, "0", 0, %Disable%
HKR, Ndi\params\DebugLevel, ParamDesc, 0, %DebugLevel%
HKR, Ndi\params\DebugLevel, type, 0, "int"
HKR, Ndi\params\DebugLevel, default, 0, "0"
HKR, Ndi\params\DebugLevel, min, 0, "0"
HKR, Ndi\params\DebugLevel, max, 0, "8"
HKR, Ndi\params\DebugLevel, step, 0, "1"
HKR, Ndi\params\LogStatistics, ParamDesc, 0, %LogStatistics%
HKR, Ndi\params\LogStatistics, type, 0, "int"
HKR, Ndi\params\LogStatistics, default, 0, "0"
HKR, Ndi\params\LogStatistics, min, 0, "0"
HKR, Ndi\params\LogStatistics, max, 0, "10000"
HKR, Ndi\params\LogStatistics, step, 0, "1"
HKR, Ndi\params\MTU, ParamDesc, 0, %MTU%
HKR, Ndi\params\MTU, type, 0, "long"
HKR, Ndi\params\MTU, default, 0, "1500"
HKR, Ndi\params\MTU, min, 0, "500"
HKR, Ndi\params\MTU, max, 0, "65500"
HKR, Ndi\params\MTU, step, 0, "1"
HKR, Ndi\params\TxCapacity, ParamDesc, 0, %TxCapacity%
HKR, Ndi\params\TxCapacity, type, 0, "enum"
HKR, Ndi\params\TxCapacity, default, 0, "1024"
HKR, Ndi\Params\TxCapacity\enum, "16", 0, %String_16%
HKR, Ndi\Params\TxCapacity\enum, "32", 0, %String_32%
HKR, Ndi\Params\TxCapacity\enum, "64", 0, %String_64%
HKR, Ndi\Params\TxCapacity\enum, "128", 0, %String_128%
HKR, Ndi\Params\TxCapacity\enum, "256", 0, %String_256%
HKR, Ndi\Params\TxCapacity\enum, "512", 0, %String_512%
HKR, Ndi\Params\TxCapacity\enum, "1024", 0, %String_1024%
HKR, Ndi\params\RxCapacity, ParamDesc, 0, %RxCapacity%
HKR, Ndi\params\RxCapacity, type, 0, "enum"
HKR, Ndi\params\RxCapacity, default, 0, "256"
HKR, Ndi\Params\RxCapacity\enum, "16", 0, %String_16%
HKR, Ndi\Params\RxCapacity\enum, "32", 0, %String_32%
HKR, Ndi\Params\RxCapacity\enum, "64", 0, %String_64%
HKR, Ndi\Params\RxCapacity\enum, "128", 0, %String_128%
HKR, Ndi\Params\RxCapacity\enum, "256", 0, %String_256%
HKR, Ndi\Params\RxCapacity\enum, "512", 0, %String_512%
HKR, Ndi\Params\RxCapacity\enum, "1024", 0, %String_1024%
HKR, Ndi\Params\MergeableBuf, ParamDesc, 0, %MergeableBuf%
HKR, Ndi\Params\MergeableBuf, Default, 0, "1"
HKR, Ndi\Params\MergeableBuf, type, 0, "enum"
HKR, Ndi\Params\MergeableBuf\enum, "1", 0, %Enable%
HKR, Ndi\Params\MergeableBuf\enum, "0", 0, %Disable%
HKR, Ndi\params\NetworkAddress, ParamDesc, 0, %NetworkAddress%
HKR, Ndi\params\NetworkAddress, type, 0, "edit"
HKR, Ndi\params\NetworkAddress, Optional, 0, "1"
HKR, Ndi\Params\Indirect, ParamDesc, 0, %Indirect%
HKR, Ndi\Params\Indirect, Default, 0, "0"
HKR, Ndi\Params\Indirect, type, 0, "enum"
HKR, Ndi\Params\Indirect\enum, "0", 0, %Disable%
HKR, Ndi\Params\Indirect\enum, "1", 0, %Enable%
HKR, Ndi\Params\Indirect\enum, "2", 0, %Enable*%
HKR, Ndi\Params\OffLoad.TxChecksum, ParamDesc, 0, %OffLoad.TxChecksum%
HKR, Ndi\Params\OffLoad.TxChecksum, Default, 0, "0"
HKR, Ndi\Params\OffLoad.TxChecksum, type, 0, "enum"
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "31", 0, %All%
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "27", 0, %TCPUDPAll%
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "3", 0, %TCPUDPv4%
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "1", 0, %TCPv4%
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "0", 0, %Disable%
HKR, Ndi\Params\OffLoad.TxLSO, ParamDesc, 0, %OffLoad.TxLSO%
HKR, Ndi\Params\OffLoad.TxLSO, Default, 0, "2"
HKR, Ndi\Params\OffLoad.TxLSO, type, 0, "enum"
HKR, Ndi\Params\OffLoad.TxLSO\enum, "2", 0, %Maximal%
HKR, Ndi\Params\OffLoad.TxLSO\enum, "1", 0, %IPv4%
HKR, Ndi\Params\OffLoad.TxLSO\enum, "0", 0, %Disable%
HKR, Ndi\Params\OffLoad.RxCS, ParamDesc, 0, %OffLoad.RxCS%
HKR, Ndi\Params\OffLoad.RxCS, Default, 0, "0"
HKR, Ndi\Params\OffLoad.RxCS, type, 0, "enum"
HKR, Ndi\Params\OffLoad.RxCS\enum, "31", 0, %All%
HKR, Ndi\Params\OffLoad.RxCS\enum, "27", 0, %TCPUDPAll%
HKR, Ndi\Params\OffLoad.RxCS\enum, "3", 0, %TCPUDPv4%
HKR, Ndi\Params\OffLoad.RxCS\enum, "1", 0, %TCPv4%
HKR, Ndi\Params\OffLoad.RxCS\enum, "0", 0, %Disable%
HKR, Ndi\params\ConnectTimer, ParamDesc, 0, %ConnectAfter%
HKR, Ndi\params\ConnectTimer, type, 0, "long"
HKR, Ndi\params\ConnectTimer, default, 0, "0"
HKR, Ndi\params\ConnectTimer, min, 0, "0"
HKR, Ndi\params\ConnectTimer, max, 0, "300000"
HKR, Ndi\params\ConnectTimer, step, 0, "50"
HKR, Ndi\Params\DpcCheck, ParamDesc, 0, %DPCCheck%
HKR, Ndi\Params\DpcCheck, Default, 0, "0"
HKR, Ndi\Params\DpcCheck, type, 0, "enum"
HKR, Ndi\Params\DpcCheck\enum, "1", 0, %Enable%
HKR, Ndi\Params\DpcCheck\enum, "0", 0, %Disable%
HKR, Ndi\Params\DpcCheck\enum, "2", 0, %Enable*%
HKR, Ndi\Params\Gather, ParamDesc, 0, %Gather%
HKR, Ndi\Params\Gather, Default, 0, "1"
HKR, Ndi\Params\Gather, type, 0, "enum"
HKR, Ndi\Params\Gather\enum, "1", 0, %Enable%
HKR, Ndi\Params\Gather\enum, "0", 0, %Disable%
HKR, Ndi\Params\Offload.GuestCS, ParamDesc, 0, %GuestCS%
HKR, Ndi\Params\Offload.GuestCS, Default, 0, "0"
HKR, Ndi\Params\Offload.GuestCS, type, 0, "enum"
HKR, Ndi\Params\Offload.GuestCS\enum,"1", 0, %Enable%
HKR, Ndi\Params\Offload.GuestCS\enum,"0", 0, %Disable%
HKR, Ndi\Params\InterruptRecovery, ParamDesc, 0, %InterruptRecovery%
HKR, Ndi\Params\InterruptRecovery, Default, 0, "1"
HKR, Ndi\Params\InterruptRecovery, type, 0, "enum"
HKR, Ndi\Params\InterruptRecovery\enum, "1", 0, %Enable%
HKR, Ndi\Params\InterruptRecovery\enum, "0", 0, %Disable%
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, ParamDesc, 0, %NumberOfHandledRXPackersInDPC%
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, type, 0, "long"
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, default, 0, "1000"
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, min, 0, "1"
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, max, 0, "10000"
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, step, 0, "1"
HKR, Ndi\Params\PacketFilter, ParamDesc, 0, %PacketFilter%
HKR, Ndi\Params\PacketFilter, Default, 0, "1"
HKR, Ndi\Params\PacketFilter, type, 0, "enum"
HKR, Ndi\Params\PacketFilter\enum, "1", 0, %Enable%
HKR, Ndi\Params\PacketFilter\enum, "0", 0, %Disable%
HKR, Ndi\Params\BatchReceive, ParamDesc, 0, %BatchReceive%
HKR, Ndi\Params\BatchReceive, Default, 0, "1"
HKR, Ndi\Params\BatchReceive, type, 0, "enum"
HKR, Ndi\Params\BatchReceive\enum, "1", 0, %Enable%
HKR, Ndi\Params\BatchReceive\enum, "0", 0, %Disable%
HKR, Ndi\Params\Promiscuous, ParamDesc, 0, %Promiscuous%
HKR, Ndi\Params\Promiscuous, Default, 0, "0"
HKR, Ndi\Params\Promiscuous, type, 0, "enum"
HKR, Ndi\Params\Promiscuous\enum, "1", 0, %Enable%
HKR, Ndi\Params\Promiscuous\enum, "0", 0, %Disable%
HKR, Ndi\Params\IPPacketsCheck, ParamDesc, 0, %IPPacketsCheck%
HKR, Ndi\Params\IPPacketsCheck, Default, 0, "0"
HKR, Ndi\Params\IPPacketsCheck, type, 0, "enum"
HKR, Ndi\Params\IPPacketsCheck\enum,"0", 0, %Disable%
HKR, Ndi\Params\IPPacketsCheck\enum,"1", 0, %Tx%
HKR, Ndi\Params\IPPacketsCheck\enum,"2", 0, %Rx%
HKR, Ndi\Params\IPPacketsCheck\enum,"3", 0, %TxRx%
HKR, Ndi\Params\UseSwTxChecksum, ParamDesc, 0, %UseSwTxChecksum%
HKR, Ndi\Params\UseSwTxChecksum, Default, 0, "0"
HKR, Ndi\Params\UseSwTxChecksum, type, 0, "enum"
HKR, Ndi\Params\UseSwTxChecksum\enum, "1", 0, %Enable%
HKR, Ndi\Params\UseSwTxChecksum\enum, "0", 0, %Disable%
[kvmnet5.CopyFiles]
netkvm.sys,,,2
[kvmnet5.Service]
DisplayName = %kvmnet5.Service.DispName%
ServiceType = 1 ;%SERVICE_KERNEL_DRIVER%
StartType = 3 ;%SERVICE_DEMAND_START%
ErrorControl = 1 ;%SERVICE_ERROR_NORMAL%
ServiceBinary = %12%\netkvm.sys
LoadOrderGroup = NDIS
AddReg = TextModeFlags.Reg
[kvmnet5.EventLog]
AddReg = kvmnet5.AddEventLog.Reg
[kvmnet5.AddEventLog.Reg]
HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\netevent.dll"
HKR, , TypesSupported, 0x00010001, 7
[TextModeFlags.Reg]
HKR, , TextModeFlags, 0x00010001, 0x0001
[SourceDisksNames]
1 = %DiskId1%,,,""
[SourceDisksFiles]
netkvm.sys = 1,,
[DestinationDirs]
kvmnet5.CopyFiles = 12
[Strings]
VENDOR = "INX_COMPANY"
kvmnet5.DeviceDesc = "INX_PREFIX_VENDORVirtIO Ethernet Adapter"
kvmnet5.Service.DispName = "INX_PREFIX_VENDORVirtIO Ethernet Adapter Service"
DiskId1 = "INX_PREFIX_VENDORVirtIO Ethernet Adapter Driver Disk #1"
NetworkAddress = "Assign MAC"
ConnectRate = "Init.ConnectionRate(Mb)"
Priority = "Init.Do802.1PQ"
MergeableBuf = "Init.UseMergedBuffers"
MTU = "Init.MTUSize"
Indirect = "Init.IndirectTx"
TxCapacity = "Init.MaxTxBuffers"
RxCapacity = "Init.MaxRxBuffers"
Offload.TxChecksum = "Offload.Tx.Checksum"
Offload.TxLSO = "Offload.Tx.LSO"
Offload.RxCS = "Offload.Rx.Checksum"
EnableLogging = "Logging.Enable"
DebugLevel = "Logging.Level"
LogStatistics = "Logging.Statistics(sec)"
ConnectAfter = "TestOnly.DelayConnect(ms)"
DPCCheck = "TestOnly.DPCChecking"
Gather = "TestOnly.Scatter-Gather"
GuestCS = "TestOnly.GuestChecksum"
InterruptRecovery = "TestOnly.InterruptRecovery"
PacketFilter = "TestOnly.HwPacketFilter"
BatchReceive = "TestOnly.BatchReceive"
Promiscuous = "TestOnly.Promiscuous"
IPPacketsCheck = "TestOnly.AnalyzeIPPackets"
NumberOfHandledRXPackersInDPC = "TestOnly.RXThrottle"
UseSwTxChecksum = "TestOnly.UseSwTxChecksum"
Tx = "Tx Enabled";
Rx = "Rx Enabled";
TxRx = "Rx & Tx Enabled";
Disable = "Disabled"
Enable = "Enabled"
Enable* = "Enabled*"
String_16 = "16"
String_32 = "32"
String_64 = "64"
String_128 = "128"
String_256 = "256"
String_512 = "512"
String_1024 = "1024"
PriorityVlanTag = "Priority and VLAN tagging"
PriorityOnly = "Priority"
VLan = "VLan"
Priority_Vlan = "All"
10M = "10M"
100M = "100M"
1G = "1G"
10G = "10G"
TCPv4 = "TCP(v4)"
TCPUDPv4 = "TCP/UDP(v4)"
TCPUDPAll = "TCP/UDP(v4,v6)"
All = "All"
IPv4 = "IPv4"
Maximal = "Maximal"

View file

@ -0,0 +1,31 @@
/*
* This file contains resource (version) definitions for NDIS 5 driver.
*
* Copyright (c) 2008-2017 Red Hat, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of their contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <quverp.h>

View file

@ -28,6 +28,7 @@ list(APPEND INF_FILES
netamd.inf
nete1000.inf
netisa.inf
netkvm.inf
netrtl.inf
netrtpnt.inf
nettcpip.inf

331
media/inf/netkvm.inf Normal file
View file

@ -0,0 +1,331 @@
;/*++
;Copyright (c) 2008-2019 Red Hat Inc.
;
;
;Module Name:
; netkvm.inf
;
;Abstract:
; VirtIO Ethernet Adapter
;
;Installation Notes:
; Using Devcon: Type "devcon install netkvm.inf PCI\VEN_1AF4&DEV_1000&SUBSYS_00011AF4&REV_00" or
; "devcon install netkvm.inf PCI\VEN_1AF4&DEV_1041&SUBSYS_11001AF4&REV_01" to install
;
;--*/
[version]
Signature = "$Windows NT$"
Class = Net
CatalogFile = netkvm.cat
ClassGUID = {4d36e972-e325-11ce-bfc1-08002be10318}
Provider = %VENDOR%
DriverVer = 04/12/2019,51.77.104.17100
DriverPackageType = PlugAndPlay
DriverPackageDisplayName = %kvmnet5.DeviceDesc%
[Manufacturer]
%VENDOR% = NetKVM, NTx86.5.1
[NetKVM.NTx86.5.1]
%kvmnet5.DeviceDesc% = kvmnet5.ndi, PCI\VEN_1AF4&DEV_1000&SUBSYS_00011AF4&REV_00, PCI\VEN_1AF4&DEV_1000
%kvmnet5.DeviceDesc% = kvmnet5.ndi, PCI\VEN_1AF4&DEV_1041&SUBSYS_11001AF4&REV_01, PCI\VEN_1AF4&DEV_1041
[kvmnet5.ndi]
Characteristics = 0x84 ; NCF_PHYSICAL | NCF_HAS_UI
BusType = 5 ; PCI
AddReg = kvmnet5.Reg, Parameters
CopyFiles = kvmnet5.CopyFiles
*IfType = 6
*MediaType = 0 ; NdisMedium802_3
*PhysicalMediaType = 0 ; NdisPhysicalMediumUnspecified
[kvmnet5.ndi.Services]
AddService = netkvm, 2, kvmnet5.Service, kvmnet5.EventLog
[kvmnet5.Reg]
HKR, , BusNumber, 0, "0"
HKR, Ndi, Service, 0, "netkvm"
HKR, Ndi\Interfaces, UpperRange, 0, "ndis5"
HKR, Ndi\Interfaces, LowerRange, 0, "ethernet"
[Parameters]
HKR, Ndi\Params\ConnectRate, ParamDesc, 0, %ConnectRate%
HKR, Ndi\Params\ConnectRate, Default, 0, "1001"
HKR, Ndi\Params\ConnectRate, type, 0, "enum"
HKR, Ndi\Params\ConnectRate\enum, "10", 0, %10M%
HKR, Ndi\Params\ConnectRate\enum, "100", 0, %100M%
HKR, Ndi\Params\ConnectRate\enum, "1001", 0, %1G%
HKR, Ndi\Params\ConnectRate\enum, "10000", 0, %10G%
HKR, Ndi\Params\Priority, ParamDesc, 0, %Priority%
HKR, Ndi\Params\Priority, Default, 0, "1"
HKR, Ndi\Params\Priority, type, 0, "enum"
HKR, Ndi\Params\Priority\enum, "1", 0, %Enable%
HKR, Ndi\Params\Priority\enum, "0", 0, %Disable%
HKR, Ndi\Params\*PriorityVLANTag, ParamDesc, 0, %PriorityVlanTag%
HKR, Ndi\Params\*PriorityVLANTag, Default, 0, "3"
HKR, Ndi\Params\*PriorityVLANTag, type, 0, "enum"
HKR, Ndi\Params\*PriorityVLANTag\enum, "3", 0, %Priority_Vlan%
HKR, Ndi\Params\*PriorityVLANTag\enum, "2", 0, %VLan%
HKR, Ndi\Params\*PriorityVLANTag\enum, "1", 0, %PriorityOnly%
HKR, Ndi\Params\*PriorityVLANTag\enum, "0", 0, %Disable%
HKR, Ndi\Params\DoLog, ParamDesc, 0, %EnableLogging%
HKR, Ndi\Params\DoLog, Default, 0, "1"
HKR, Ndi\Params\DoLog, type, 0, "enum"
HKR, Ndi\Params\DoLog\enum, "1", 0, %Enable%
HKR, Ndi\Params\DoLog\enum, "0", 0, %Disable%
HKR, Ndi\params\DebugLevel, ParamDesc, 0, %DebugLevel%
HKR, Ndi\params\DebugLevel, type, 0, "int"
HKR, Ndi\params\DebugLevel, default, 0, "0"
HKR, Ndi\params\DebugLevel, min, 0, "0"
HKR, Ndi\params\DebugLevel, max, 0, "8"
HKR, Ndi\params\DebugLevel, step, 0, "1"
HKR, Ndi\params\LogStatistics, ParamDesc, 0, %LogStatistics%
HKR, Ndi\params\LogStatistics, type, 0, "int"
HKR, Ndi\params\LogStatistics, default, 0, "0"
HKR, Ndi\params\LogStatistics, min, 0, "0"
HKR, Ndi\params\LogStatistics, max, 0, "10000"
HKR, Ndi\params\LogStatistics, step, 0, "1"
HKR, Ndi\params\MTU, ParamDesc, 0, %MTU%
HKR, Ndi\params\MTU, type, 0, "long"
HKR, Ndi\params\MTU, default, 0, "1500"
HKR, Ndi\params\MTU, min, 0, "500"
HKR, Ndi\params\MTU, max, 0, "65500"
HKR, Ndi\params\MTU, step, 0, "1"
HKR, Ndi\params\TxCapacity, ParamDesc, 0, %TxCapacity%
HKR, Ndi\params\TxCapacity, type, 0, "enum"
HKR, Ndi\params\TxCapacity, default, 0, "1024"
HKR, Ndi\Params\TxCapacity\enum, "16", 0, %String_16%
HKR, Ndi\Params\TxCapacity\enum, "32", 0, %String_32%
HKR, Ndi\Params\TxCapacity\enum, "64", 0, %String_64%
HKR, Ndi\Params\TxCapacity\enum, "128", 0, %String_128%
HKR, Ndi\Params\TxCapacity\enum, "256", 0, %String_256%
HKR, Ndi\Params\TxCapacity\enum, "512", 0, %String_512%
HKR, Ndi\Params\TxCapacity\enum, "1024", 0, %String_1024%
HKR, Ndi\params\RxCapacity, ParamDesc, 0, %RxCapacity%
HKR, Ndi\params\RxCapacity, type, 0, "enum"
HKR, Ndi\params\RxCapacity, default, 0, "256"
HKR, Ndi\Params\RxCapacity\enum, "16", 0, %String_16%
HKR, Ndi\Params\RxCapacity\enum, "32", 0, %String_32%
HKR, Ndi\Params\RxCapacity\enum, "64", 0, %String_64%
HKR, Ndi\Params\RxCapacity\enum, "128", 0, %String_128%
HKR, Ndi\Params\RxCapacity\enum, "256", 0, %String_256%
HKR, Ndi\Params\RxCapacity\enum, "512", 0, %String_512%
HKR, Ndi\Params\RxCapacity\enum, "1024", 0, %String_1024%
HKR, Ndi\Params\MergeableBuf, ParamDesc, 0, %MergeableBuf%
HKR, Ndi\Params\MergeableBuf, Default, 0, "1"
HKR, Ndi\Params\MergeableBuf, type, 0, "enum"
HKR, Ndi\Params\MergeableBuf\enum, "1", 0, %Enable%
HKR, Ndi\Params\MergeableBuf\enum, "0", 0, %Disable%
HKR, Ndi\params\NetworkAddress, ParamDesc, 0, %NetworkAddress%
HKR, Ndi\params\NetworkAddress, type, 0, "edit"
HKR, Ndi\params\NetworkAddress, Optional, 0, "1"
HKR, Ndi\Params\Indirect, ParamDesc, 0, %Indirect%
HKR, Ndi\Params\Indirect, Default, 0, "0"
HKR, Ndi\Params\Indirect, type, 0, "enum"
HKR, Ndi\Params\Indirect\enum, "0", 0, %Disable%
HKR, Ndi\Params\Indirect\enum, "1", 0, %Enable%
HKR, Ndi\Params\Indirect\enum, "2", 0, %Enable*%
HKR, Ndi\Params\OffLoad.TxChecksum, ParamDesc, 0, %OffLoad.TxChecksum%
HKR, Ndi\Params\OffLoad.TxChecksum, Default, 0, "0"
HKR, Ndi\Params\OffLoad.TxChecksum, type, 0, "enum"
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "31", 0, %All%
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "27", 0, %TCPUDPAll%
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "3", 0, %TCPUDPv4%
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "1", 0, %TCPv4%
HKR, Ndi\Params\OffLoad.TxChecksum\enum, "0", 0, %Disable%
HKR, Ndi\Params\OffLoad.TxLSO, ParamDesc, 0, %OffLoad.TxLSO%
HKR, Ndi\Params\OffLoad.TxLSO, Default, 0, "2"
HKR, Ndi\Params\OffLoad.TxLSO, type, 0, "enum"
HKR, Ndi\Params\OffLoad.TxLSO\enum, "2", 0, %Maximal%
HKR, Ndi\Params\OffLoad.TxLSO\enum, "1", 0, %IPv4%
HKR, Ndi\Params\OffLoad.TxLSO\enum, "0", 0, %Disable%
HKR, Ndi\Params\OffLoad.RxCS, ParamDesc, 0, %OffLoad.RxCS%
HKR, Ndi\Params\OffLoad.RxCS, Default, 0, "0"
HKR, Ndi\Params\OffLoad.RxCS, type, 0, "enum"
HKR, Ndi\Params\OffLoad.RxCS\enum, "31", 0, %All%
HKR, Ndi\Params\OffLoad.RxCS\enum, "27", 0, %TCPUDPAll%
HKR, Ndi\Params\OffLoad.RxCS\enum, "3", 0, %TCPUDPv4%
HKR, Ndi\Params\OffLoad.RxCS\enum, "1", 0, %TCPv4%
HKR, Ndi\Params\OffLoad.RxCS\enum, "0", 0, %Disable%
HKR, Ndi\params\ConnectTimer, ParamDesc, 0, %ConnectAfter%
HKR, Ndi\params\ConnectTimer, type, 0, "long"
HKR, Ndi\params\ConnectTimer, default, 0, "0"
HKR, Ndi\params\ConnectTimer, min, 0, "0"
HKR, Ndi\params\ConnectTimer, max, 0, "300000"
HKR, Ndi\params\ConnectTimer, step, 0, "50"
HKR, Ndi\Params\DpcCheck, ParamDesc, 0, %DPCCheck%
HKR, Ndi\Params\DpcCheck, Default, 0, "0"
HKR, Ndi\Params\DpcCheck, type, 0, "enum"
HKR, Ndi\Params\DpcCheck\enum, "1", 0, %Enable%
HKR, Ndi\Params\DpcCheck\enum, "0", 0, %Disable%
HKR, Ndi\Params\DpcCheck\enum, "2", 0, %Enable*%
HKR, Ndi\Params\Gather, ParamDesc, 0, %Gather%
HKR, Ndi\Params\Gather, Default, 0, "1"
HKR, Ndi\Params\Gather, type, 0, "enum"
HKR, Ndi\Params\Gather\enum, "1", 0, %Enable%
HKR, Ndi\Params\Gather\enum, "0", 0, %Disable%
HKR, Ndi\Params\Offload.GuestCS, ParamDesc, 0, %GuestCS%
HKR, Ndi\Params\Offload.GuestCS, Default, 0, "0"
HKR, Ndi\Params\Offload.GuestCS, type, 0, "enum"
HKR, Ndi\Params\Offload.GuestCS\enum,"1", 0, %Enable%
HKR, Ndi\Params\Offload.GuestCS\enum,"0", 0, %Disable%
HKR, Ndi\Params\InterruptRecovery, ParamDesc, 0, %InterruptRecovery%
HKR, Ndi\Params\InterruptRecovery, Default, 0, "1"
HKR, Ndi\Params\InterruptRecovery, type, 0, "enum"
HKR, Ndi\Params\InterruptRecovery\enum, "1", 0, %Enable%
HKR, Ndi\Params\InterruptRecovery\enum, "0", 0, %Disable%
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, ParamDesc, 0, %NumberOfHandledRXPackersInDPC%
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, type, 0, "long"
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, default, 0, "1000"
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, min, 0, "1"
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, max, 0, "10000"
HKR, Ndi\params\NumberOfHandledRXPackersInDPC, step, 0, "1"
HKR, Ndi\Params\PacketFilter, ParamDesc, 0, %PacketFilter%
HKR, Ndi\Params\PacketFilter, Default, 0, "1"
HKR, Ndi\Params\PacketFilter, type, 0, "enum"
HKR, Ndi\Params\PacketFilter\enum, "1", 0, %Enable%
HKR, Ndi\Params\PacketFilter\enum, "0", 0, %Disable%
HKR, Ndi\Params\BatchReceive, ParamDesc, 0, %BatchReceive%
HKR, Ndi\Params\BatchReceive, Default, 0, "1"
HKR, Ndi\Params\BatchReceive, type, 0, "enum"
HKR, Ndi\Params\BatchReceive\enum, "1", 0, %Enable%
HKR, Ndi\Params\BatchReceive\enum, "0", 0, %Disable%
HKR, Ndi\Params\Promiscuous, ParamDesc, 0, %Promiscuous%
HKR, Ndi\Params\Promiscuous, Default, 0, "0"
HKR, Ndi\Params\Promiscuous, type, 0, "enum"
HKR, Ndi\Params\Promiscuous\enum, "1", 0, %Enable%
HKR, Ndi\Params\Promiscuous\enum, "0", 0, %Disable%
HKR, Ndi\Params\IPPacketsCheck, ParamDesc, 0, %IPPacketsCheck%
HKR, Ndi\Params\IPPacketsCheck, Default, 0, "0"
HKR, Ndi\Params\IPPacketsCheck, type, 0, "enum"
HKR, Ndi\Params\IPPacketsCheck\enum,"0", 0, %Disable%
HKR, Ndi\Params\IPPacketsCheck\enum,"1", 0, %Tx%
HKR, Ndi\Params\IPPacketsCheck\enum,"2", 0, %Rx%
HKR, Ndi\Params\IPPacketsCheck\enum,"3", 0, %TxRx%
HKR, Ndi\Params\UseSwTxChecksum, ParamDesc, 0, %UseSwTxChecksum%
HKR, Ndi\Params\UseSwTxChecksum, Default, 0, "0"
HKR, Ndi\Params\UseSwTxChecksum, type, 0, "enum"
HKR, Ndi\Params\UseSwTxChecksum\enum, "1", 0, %Enable%
HKR, Ndi\Params\UseSwTxChecksum\enum, "0", 0, %Disable%
[kvmnet5.CopyFiles]
netkvm.sys,,,2
[kvmnet5.Service]
DisplayName = %kvmnet5.Service.DispName%
ServiceType = 1 ;%SERVICE_KERNEL_DRIVER%
StartType = 3 ;%SERVICE_DEMAND_START%
ErrorControl = 1 ;%SERVICE_ERROR_NORMAL%
ServiceBinary = %12%\netkvm.sys
LoadOrderGroup = NDIS
AddReg = TextModeFlags.Reg
[kvmnet5.EventLog]
AddReg = kvmnet5.AddEventLog.Reg
[kvmnet5.AddEventLog.Reg]
HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\netevent.dll"
HKR, , TypesSupported, 0x00010001, 7
[TextModeFlags.Reg]
HKR, , TextModeFlags, 0x00010001, 0x0001
[SourceDisksNames]
1 = %DiskId1%,,,""
[SourceDisksFiles]
netkvm.sys = 1,,
[DestinationDirs]
kvmnet5.CopyFiles = 12
[Strings]
VENDOR = "Red Hat, Inc."
kvmnet5.DeviceDesc = "Red Hat VirtIO Ethernet Adapter"
kvmnet5.Service.DispName = "Red Hat VirtIO Ethernet Adapter Service"
DiskId1 = "Red Hat VirtIO Ethernet Adapter Driver Disk #1"
NetworkAddress = "Assign MAC"
ConnectRate = "Init.ConnectionRate(Mb)"
Priority = "Init.Do802.1PQ"
MergeableBuf = "Init.UseMergedBuffers"
MTU = "Init.MTUSize"
Indirect = "Init.IndirectTx"
TxCapacity = "Init.MaxTxBuffers"
RxCapacity = "Init.MaxRxBuffers"
Offload.TxChecksum = "Offload.Tx.Checksum"
Offload.TxLSO = "Offload.Tx.LSO"
Offload.RxCS = "Offload.Rx.Checksum"
EnableLogging = "Logging.Enable"
DebugLevel = "Logging.Level"
LogStatistics = "Logging.Statistics(sec)"
ConnectAfter = "TestOnly.DelayConnect(ms)"
DPCCheck = "TestOnly.DPCChecking"
Gather = "TestOnly.Scatter-Gather"
GuestCS = "TestOnly.GuestChecksum"
InterruptRecovery = "TestOnly.InterruptRecovery"
PacketFilter = "TestOnly.HwPacketFilter"
BatchReceive = "TestOnly.BatchReceive"
Promiscuous = "TestOnly.Promiscuous"
IPPacketsCheck = "TestOnly.AnalyzeIPPackets"
NumberOfHandledRXPackersInDPC = "TestOnly.RXThrottle"
UseSwTxChecksum = "TestOnly.UseSwTxChecksum"
Tx = "Tx Enabled";
Rx = "Rx Enabled";
TxRx = "Rx & Tx Enabled";
Disable = "Disabled"
Enable = "Enabled"
Enable* = "Enabled*"
String_16 = "16"
String_32 = "32"
String_64 = "64"
String_128 = "128"
String_256 = "256"
String_512 = "512"
String_1024 = "1024"
PriorityVlanTag = "Priority and VLAN tagging"
PriorityOnly = "Priority"
VLan = "VLan"
Priority_Vlan = "All"
10M = "10M"
100M = "100M"
1G = "1G"
10G = "10G"
TCPv4 = "TCP(v4)"
TCPUDPv4 = "TCP/UDP(v4)"
TCPUDPAll = "TCP/UDP(v4,v6)"
All = "All"
IPv4 = "IPv4"
Maximal = "Maximal"

View file

@ -4256,7 +4256,7 @@ NdisDprReleaseSpinLock(
* OUT PUCHAR Data);
*/
#define NdisReadRegisterUchar(Register, Data) \
*(Data) = *(Register)
*(Data) = *((volatile UCHAR * const) (Register))
/*
* VOID
@ -4265,7 +4265,7 @@ NdisDprReleaseSpinLock(
* OUT PULONG Data);
*/
#define NdisReadRegisterUlong(Register, Data) \
*(Data) = *(Register)
*(Data) = *((volatile ULONG * const) (Register))
/*
* VOID
@ -4274,7 +4274,7 @@ NdisDprReleaseSpinLock(
* OUT PUSHORT Data);
*/
#define NdisReadRegisterUshort(Register, Data) \
*(Data) = *(Register)
*(Data) = *((volatile USHORT * const) (Register))
/*
* VOID

View file

@ -1464,7 +1464,7 @@ KeFlushWriteBuffer(VOID);
* IN PKBUGCHECK_CALLBACK_RECORD CallbackRecord)
*/
#define KeInitializeCallbackRecord(CallbackRecord) \
CallbackRecord->State = BufferEmpty;
(CallbackRecord)->State = BufferEmpty;
#if defined(_PREFAST_)