diff --git a/drivers/network/dd/CMakeLists.txt b/drivers/network/dd/CMakeLists.txt index 89b96df4897..39a20fce8e3 100644 --- a/drivers/network/dd/CMakeLists.txt +++ b/drivers/network/dd/CMakeLists.txt @@ -1,5 +1,6 @@ add_subdirectory(e1000) add_subdirectory(ne2000) +add_subdirectory(netkvm) add_subdirectory(pcnet) add_subdirectory(rtl8139) diff --git a/drivers/network/dd/netkvm/CMakeLists.txt b/drivers/network/dd/netkvm/CMakeLists.txt new file mode 100644 index 00000000000..6906334f40e --- /dev/null +++ b/drivers/network/dd/netkvm/CMakeLists.txt @@ -0,0 +1,37 @@ + +include_directories(BEFORE common virtio) + +add_definitions( + -DNDIS_MINIPORT_DRIVER + -DNDIS51_MINIPORT=1) + +list(APPEND SOURCE + common/ParaNdis-Common.c + common/ParaNdis-Oid.c + common/ParaNdis-VirtIO.c + common/ParaNdis-Debug.c + common/sw-offload.c + virtio/VirtIOPCICommon.c + virtio/VirtIOPCILegacy.c + virtio/VirtIOPCIModern.c + virtio/VirtIORing.c + virtio/VirtIORing-Packed.c + wxp/ParaNdis5-Driver.c + wxp/ParaNdis5-Impl.c + wxp/ParaNdis5-Oid.c) + +add_library(netkvm MODULE ${SOURCE} wxp/parandis.rc) +set_module_type(netkvm kernelmodedriver) +add_importlibs(netkvm ndis ntoskrnl hal) +add_cd_file(TARGET netkvm DESTINATION reactos/system32/drivers FOR all) + +if (NOT MSVC) + add_compile_flags("-Wno-unused-function") + add_compile_flags("-Wno-old-style-declaration") + add_compile_flags("-Wno-unknown-pragmas") + add_compile_flags("-Wno-unused-but-set-variable") + add_compile_flags("-Wno-pointer-sign") + add_compile_flags("-Wno-pointer-to-int-cast") + add_compile_flags("-Wno-int-to-pointer-cast") + add_compile_flags("-Wno-attributes") +endif() diff --git a/drivers/network/dd/netkvm/Common/DebugData.h b/drivers/network/dd/netkvm/Common/DebugData.h new file mode 100644 index 00000000000..cf0bf849f6a --- /dev/null +++ b/drivers/network/dd/netkvm/Common/DebugData.h @@ -0,0 +1,193 @@ +/* + * This file contains definitions and data structures, common between + * NDIS driver and debugger helper unit, processing crash dump with built-in + * data provided by the driver. + * + * Included in NetKVM NDIS kernel driver for Windows. + * Included in NetKVMDumpParser application. + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef PARANDIS_DEBUG_DATA_H +#define PARANDIS_DEBUG_DATA_H + +typedef enum _etagHistoryLogOperation +{ + hopPowerOff, // common::PowerOff, 1/0 - entry/exit (none, entry, none, none) + hopPowerOn, // common::PowerOn, 1/0 - entry/exit (none, entry, none, none) + hopSysPause, // ndis6::Pause, 1/0 - entry/completion + hopSysResume, // ndis6::Restart, 1/0 - entry/completion + hopInternalSendPause, // implementation, 1/0 - entry/completion + hopInternalReceivePause, // implementation, 1/0 - entry/completion + hopInternalSendResume, // implementation + hopInternalReceiveResume, // implementation + hopSysReset, // implementation driver, 1/0 - entry/completion + hopHalt, // implementation driver, 1/0 - entry/completion + hopConnectIndication, // implementation + hopDPC, // common::DpcWorkBody (1, none, none, none) (0, left, free buffers, free desc) + hopSend, // implementation, when Send requested (nbl, nof lists, nof bufs, nof bytes) (packet, 1, nof packets, none) + hopSendNBLRequest, // ndis6 implementation (nbl, nof packets, none, none) + hopSendPacketRequest, // not used + hopSendPacketMapped, // implementation, before the packet inserted into queue (nbl, which packet, nof frags, none) + hopSubmittedPacket, // implementation, when the packet submitted (nbl, which packet, result, flags) + hopBufferSent, // implementation, when the packet returned from VirtIO queue (nbl, packet no., free buf, free desc) + hopReceiveStat, // common: RX (none, retrieved, reported, ready rx buffers) + hopBufferReturned, // not used + hopSendComplete, // implementation, when the packet completed + hopTxProcess, + hopPacketReceived, // implementation, when the packet prepared for indication (nbl, length, prio tag, type) + hopOidRequest, // implementation, none, OID, on entry(type, 1), on exit (status, 0), on complete (status, 2) + hopPnpEvent // common, none, event, 0, 0 +}eHistoryLogOperation; + +// {E51FCE18-B3E7-441e-B18C-D9E9B71616F3} +static const GUID ParaNdis_CrashGuid = +{ 0xe51fce18, 0xb3e7, 0x441e, { 0xb1, 0x8c, 0xd9, 0xe9, 0xb7, 0x16, 0x16, 0xf3 } }; + +/* This structure is NOT changeable */ +typedef struct _tagBugCheckStaticDataHeader +{ + USHORT SizeOfPointer; + USHORT StaticDataVersion; + USHORT PerNicDataVersion; + USHORT ulMaxContexts; + LARGE_INTEGER qCrashTime; + UINT64 PerNicData; + UINT64 DataArea; + UINT64 DataAreaSize; +}tBugCheckStaticDataHeader; + +/* This structure is NOT changeable */ +typedef struct _tagBugCheckDataLocation +{ + UINT64 Address; + UINT64 Size; +}tBugCheckDataLocation; + +#define PARANDIS_DEBUG_STATIC_DATA_VERSION 0 +#define PARANDIS_DEBUG_PER_NIC_DATA_VERSION 0 +#define PARANDIS_DEBUG_HISTORY_DATA_VERSION 1 +/* This structure is NOT changeable */ +typedef struct _tagBugCheckStaticDataContent_V0 +{ + ULONG SizeOfHistory; + ULONG SizeOfHistoryEntry; + LONG CurrentHistoryIndex; + ULONG HistoryDataVersion; + ULONG64 HistoryData; +}tBugCheckStaticDataContent_V0; + +#define PARANDIS_DEBUG_INTERRUPTS + +#ifdef PARANDIS_DEBUG_INTERRUPTS +# define PARANDIS_STORE_LAST_INTERRUPT_TIMESTAMP(p) \ + NdisGetCurrentSystemTime(&(p)->LastInterruptTimeStamp) +# define PARANDIS_GET_LAST_INTERRUPT_TIMESTAMP(p) \ + (p)->LastInterruptTimeStamp.QuadPart +#else +# define PARANDIS_STORE_LAST_INTERRUPT_TIMESTAMP(p) +# define PARANDIS_GET_LAST_INTERRUPT_TIMESTAMP(p) (0) +#endif + +typedef struct _tagBugCheckPerNicDataContent_V0 +{ + UINT64 Context; + LARGE_INTEGER LastInterruptTimeStamp; + LARGE_INTEGER LastTxCompletionTimeStamp; + ULONG nofPacketsToComplete; + ULONG nofReadyTxBuffers; +}tBugCheckPerNicDataContent_V0; + +typedef struct _tagBugCheckHistoryDataEntry_V0 +{ + LARGE_INTEGER TimeStamp; + UINT64 Context; + UINT64 pParam1; + ULONG operation; + ULONG lParam2; + ULONG lParam3; + ULONG lParam4; +}tBugCheckHistoryDataEntry_V0; + +typedef struct _tagBugCheckHistoryDataEntry_V1 +{ + LARGE_INTEGER TimeStamp; + UINT64 Context; + ULONG uIRQL; + ULONG uProcessor; + UINT64 pParam1; + ULONG operation; + ULONG lParam2; + ULONG lParam3; + ULONG lParam4; +}tBugCheckHistoryDataEntry_V1; + + +#if (PARANDIS_DEBUG_STATIC_DATA_VERSION == 0) +typedef tBugCheckStaticDataContent_V0 tBugCheckStaticDataContent; +#endif + +#if (PARANDIS_DEBUG_PER_NIC_DATA_VERSION == 0) +typedef tBugCheckPerNicDataContent_V0 tBugCheckPerNicDataContent; +#endif + +#if (PARANDIS_DEBUG_HISTORY_DATA_VERSION == 0) +typedef tBugCheckHistoryDataEntry_V0 tBugCheckHistoryDataEntry; +#elif (PARANDIS_DEBUG_HISTORY_DATA_VERSION == 1) +typedef tBugCheckHistoryDataEntry_V1 tBugCheckHistoryDataEntry; +#endif + +typedef struct _tagBugCheckStaticDataContent_V1 +{ + UINT64 res1; + UINT64 res2; + UINT64 History; +}tBugCheckStaticDataContent_V1; + +typedef struct _tagBugCheckPerNicDataContent_V1 +{ + UINT64 Context; + LARGE_INTEGER LastInterruptTimeStamp; + LARGE_INTEGER LastTxCompletionTimeStamp; + ULONG nofPacketsToComplete; + ULONG nofReadyTxBuffers; +}tBugCheckPerNicDataContent_V1; + + +#if (PARANDIS_DEBUG_HEADER_VERSION == 1) +typedef tBugCheckStaticDataContent_V1 tBugCheckStaticDataContent; +#endif + +#if (PARANDIS_DEBUG_PER_NIC_DATA_VERSION == 1) +typedef tBugCheckPerNicDataContent_V1 tBugCheckPerNicDataContent; +#endif + +// etc + + + +#endif diff --git a/drivers/network/dd/netkvm/Common/IONetDescriptor.h b/drivers/network/dd/netkvm/Common/IONetDescriptor.h new file mode 100644 index 00000000000..8e9a05b2110 --- /dev/null +++ b/drivers/network/dd/netkvm/Common/IONetDescriptor.h @@ -0,0 +1,133 @@ +/* + * This file contains common guest/host definition, related + * to VirtIO network adapter + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#ifndef IONETDESCRIPTOR_H +#define IONETDESCRIPTOR_H + +#pragma pack (push) +#pragma pack (1) +/* This is the first element of the scatter-gather list. If you don't + * specify GSO or CSUM features, you can simply ignore the header. */ +typedef struct _tagvirtio_net_hdr +{ +#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset +#define VIRTIO_NET_HDR_F_DATA_VALID 2 // Host checked checksum, no need to recheck + u8 flags; +#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame +#define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO) +#define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO) +#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP +#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set + u8 gso_type; + u16 hdr_len; // Ethernet + IP + tcp/udp hdrs + u16 gso_size; // Bytes to append to gso_hdr_len per frame + u16 csum_start; // Position to start checksumming from + u16 csum_offset; // Offset after that to place checksum +}virtio_net_hdr_basic; + +typedef struct _tagvirtio_net_hdr_ext +{ + virtio_net_hdr_basic BasicHeader; + u16 nBuffers; +}virtio_net_hdr_ext; + +/* + * Control virtqueue data structures + * + * The control virtqueue expects a header in the first sg entry + * and an ack/status response in the last entry. Data for the + * command goes in between. + */ +typedef struct tag_virtio_net_ctrl_hdr { + u8 class_of_command; + u8 cmd; +}virtio_net_ctrl_hdr; + +typedef u8 virtio_net_ctrl_ack; + +#define VIRTIO_NET_OK 0 +#define VIRTIO_NET_ERR 1 + +/* + * Control the RX mode, ie. promiscuous, allmulti, etc... + * All commands require an "out" sg entry containing a 1 byte + * state value, zero = disable, non-zero = enable. Commands + * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. + * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. + */ +#define VIRTIO_NET_CTRL_RX_MODE 0 + #define VIRTIO_NET_CTRL_RX_MODE_PROMISC 0 + #define VIRTIO_NET_CTRL_RX_MODE_ALLMULTI 1 + #define VIRTIO_NET_CTRL_RX_MODE_ALLUNI 2 + #define VIRTIO_NET_CTRL_RX_MODE_NOMULTI 3 + #define VIRTIO_NET_CTRL_RX_MODE_NOUNI 4 + #define VIRTIO_NET_CTRL_RX_MODE_NOBCAST 5 + +/* + * Control the MAC filter table. + * + * The MAC filter table is managed by the hypervisor, the guest should + * assume the size is infinite. Filtering should be considered + * non-perfect, ie. based on hypervisor resources, the guest may + * received packets from sources not specified in the filter list. + * + * In addition to the class/cmd header, the TABLE_SET command requires + * two out scatterlists. Each contains a 4 byte count of entries followed + * by a concatenated byte stream of the ETH_ALEN MAC addresses. The + * first sg list contains unicast addresses, the second is for multicast. + * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature + * is available. + */ +#define ETH_ALEN 6 + +struct virtio_net_ctrl_mac { + u32 entries; + // follows + //u8 macs[][ETH_ALEN]; +}; +#define VIRTIO_NET_CTRL_MAC 1 + #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 + +/* + * Control VLAN filtering + * + * The VLAN filter table is controlled via a simple ADD/DEL interface. + * VLAN IDs not added may be filterd by the hypervisor. Del is the + * opposite of add. Both commands expect an out entry containing a 2 + * byte VLAN ID. VLAN filtering is available with the + * VIRTIO_NET_F_CTRL_VLAN feature bit. + */ +#define VIRTIO_NET_CTRL_VLAN 2 + #define VIRTIO_NET_CTRL_VLAN_ADD 0 + #define VIRTIO_NET_CTRL_VLAN_DEL 1 + + +#pragma pack (pop) + +#endif diff --git a/drivers/network/dd/netkvm/Common/ParaNdis-Common.c b/drivers/network/dd/netkvm/Common/ParaNdis-Common.c new file mode 100644 index 00000000000..ebb0c04b981 --- /dev/null +++ b/drivers/network/dd/netkvm/Common/ParaNdis-Common.c @@ -0,0 +1,3017 @@ +/* + * This file contains NDIS driver procedures, common for NDIS5 and NDIS6 + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "ndis56common.h" + +#ifdef WPP_EVENT_TRACING +#include "ParaNdis-Common.tmh" +#endif + +static void ReuseReceiveBufferRegular(PARANDIS_ADAPTER *pContext, pIONetDescriptor pBuffersDescriptor); +static void ReuseReceiveBufferPowerOff(PARANDIS_ADAPTER *pContext, pIONetDescriptor pBuffersDescriptor); + +//#define ROUNDSIZE(sz) ((sz + 15) & ~15) +#define MAX_VLAN_ID 4095 + +#if 0 +void FORCEINLINE DebugDumpPacket(LPCSTR prefix, PVOID header, int level) +{ + PUCHAR peth = (PUCHAR)header; + DPrintf(level, ("[%s] %02X%02X%02X%02X%02X%02X => %02X%02X%02X%02X%02X%02X", prefix, + peth[6], peth[7], peth[8], peth[9], peth[10], peth[11], + peth[0], peth[1], peth[2], peth[3], peth[4], peth[5])); +} +#else +void FORCEINLINE DebugDumpPacket(LPCSTR prefix, PVOID header, int level) +{ +} +#endif + + + +/********************************************************** +Validates MAC address +Valid MAC address is not broadcast, not multicast, not empty +if bLocal is set, it must be LOCAL +if not, is must be non-local or local +Parameters: + PUCHAR pcMacAddress - MAC address to validate + BOOLEAN bLocal - TRUE, if we validate locally administered address +Return value: + TRUE if valid +***********************************************************/ +BOOLEAN ParaNdis_ValidateMacAddress(PUCHAR pcMacAddress, BOOLEAN bLocal) +{ + BOOLEAN bLA = FALSE, bEmpty, bBroadcast, bMulticast = FALSE; + bBroadcast = ETH_IS_BROADCAST(pcMacAddress); + bLA = !bBroadcast && ETH_IS_LOCALLY_ADMINISTERED(pcMacAddress); + bMulticast = !bBroadcast && ETH_IS_MULTICAST(pcMacAddress); + bEmpty = ETH_IS_EMPTY(pcMacAddress); + return !bBroadcast && !bEmpty && !bMulticast && (!bLocal || bLA); +} + +static eInspectedPacketType QueryPacketType(PVOID data) +{ + if (ETH_IS_BROADCAST(data)) + return iptBroadcast; + if (ETH_IS_MULTICAST(data)) + return iptMulticast; + return iptUnicast; +} + +typedef struct _tagConfigurationEntry +{ + const char *Name; + ULONG ulValue; + ULONG ulMinimal; + ULONG ulMaximal; +}tConfigurationEntry; + +typedef struct _tagConfigurationEntries +{ + tConfigurationEntry isPromiscuous; + tConfigurationEntry PrioritySupport; + tConfigurationEntry ConnectRate; + tConfigurationEntry isLogEnabled; + tConfigurationEntry debugLevel; + tConfigurationEntry connectTimer; + tConfigurationEntry dpcChecker; + tConfigurationEntry TxCapacity; + tConfigurationEntry RxCapacity; + tConfigurationEntry InterruptRecovery; + tConfigurationEntry LogStatistics; + tConfigurationEntry PacketFiltering; + tConfigurationEntry ScatterGather; + tConfigurationEntry BatchReceive; + tConfigurationEntry OffloadTxChecksum; + tConfigurationEntry OffloadTxLSO; + tConfigurationEntry OffloadRxCS; + tConfigurationEntry OffloadGuestCS; + tConfigurationEntry UseSwTxChecksum; + tConfigurationEntry IPPacketsCheck; + tConfigurationEntry stdIpcsV4; + tConfigurationEntry stdTcpcsV4; + tConfigurationEntry stdTcpcsV6; + tConfigurationEntry stdUdpcsV4; + tConfigurationEntry stdUdpcsV6; + tConfigurationEntry stdLsoV1; + tConfigurationEntry stdLsoV2ip4; + tConfigurationEntry stdLsoV2ip6; + tConfigurationEntry PriorityVlanTagging; + tConfigurationEntry VlanId; + tConfigurationEntry UseMergeableBuffers; + tConfigurationEntry MTU; + tConfigurationEntry NumberOfHandledRXPackersInDPC; + tConfigurationEntry Indirect; +}tConfigurationEntries; + +static const tConfigurationEntries defaultConfiguration = +{ + { "Promiscuous", 0, 0, 1 }, + { "Priority", 0, 0, 1 }, + { "ConnectRate", 100,10,10000 }, + { "DoLog", 1, 0, 1 }, + { "DebugLevel", 2, 0, 8 }, + { "ConnectTimer", 0, 0, 300000 }, + { "DpcCheck", 0, 0, 2 }, + { "TxCapacity", 1024, 16, 1024 }, + { "RxCapacity", 256, 32, 1024 }, + { "InterruptRecovery", 0, 0, 1}, + { "LogStatistics", 0, 0, 10000}, + { "PacketFilter", 1, 0, 1}, + { "Gather", 1, 0, 1}, + { "BatchReceive", 1, 0, 1}, + { "Offload.TxChecksum", 0, 0, 31}, + { "Offload.TxLSO", 0, 0, 2}, + { "Offload.RxCS", 0, 0, 31}, + { "Offload.GuestCS", 0, 0, 1}, + { "UseSwTxChecksum", 0, 0, 1 }, + { "IPPacketsCheck", 0, 0, 3 }, + { "*IPChecksumOffloadIPv4", 3, 0, 3 }, + { "*TCPChecksumOffloadIPv4",3, 0, 3 }, + { "*TCPChecksumOffloadIPv6",3, 0, 3 }, + { "*UDPChecksumOffloadIPv4",3, 0, 3 }, + { "*UDPChecksumOffloadIPv6",3, 0, 3 }, + { "*LsoV1IPv4", 1, 0, 1 }, + { "*LsoV2IPv4", 1, 0, 1 }, + { "*LsoV2IPv6", 1, 0, 1 }, + { "*PriorityVLANTag", 3, 0, 3}, + { "VlanId", 0, 0, MAX_VLAN_ID}, + { "MergeableBuf", 1, 0, 1}, + { "MTU", 1500, 500, 65500}, + { "NumberOfHandledRXPackersInDPC", MAX_RX_LOOPS, 1, 10000}, + { "Indirect", 0, 0, 2}, +}; + +static void ParaNdis_ResetVirtIONetDevice(PARANDIS_ADAPTER *pContext) +{ + virtio_device_reset(&pContext->IODevice); + DPrintf(0, ("[%s] Done", __FUNCTION__)); + /* reset all the features in the device */ + pContext->ulCurrentVlansFilterSet = 0; + pContext->ullGuestFeatures = 0; +#ifdef VIRTIO_RESET_VERIFY + if (1) + { + u8 devStatus; + devStatus = virtio_get_status(&pContext->IODevice); + if (devStatus) + { + DPrintf(0, ("[%s] Device status is still %02X", __FUNCTION__, (ULONG)devStatus)); + virtio_device_reset(&pContext->IODevice); + devStatus = virtio_get_status(&pContext->IODevice); + DPrintf(0, ("[%s] Device status on retry %02X", __FUNCTION__, (ULONG)devStatus)); + } + } +#endif +} + +/********************************************************** +Gets integer value for specifies in pEntry->Name name +Parameters: + NDIS_HANDLE cfg previously open configuration + tConfigurationEntry *pEntry - Entry to fill value in +***********************************************************/ +static void GetConfigurationEntry(NDIS_HANDLE cfg, tConfigurationEntry *pEntry) +{ + NDIS_STATUS status; + const char *statusName; + NDIS_STRING name = {0}; + PNDIS_CONFIGURATION_PARAMETER pParam = NULL; + NDIS_PARAMETER_TYPE ParameterType = NdisParameterInteger; + NdisInitializeString(&name, (PUCHAR)pEntry->Name); + NdisReadConfiguration( + &status, + &pParam, + cfg, + &name, + ParameterType); + if (status == NDIS_STATUS_SUCCESS) + { + ULONG ulValue = pParam->ParameterData.IntegerData; + if (ulValue >= pEntry->ulMinimal && ulValue <= pEntry->ulMaximal) + { + pEntry->ulValue = ulValue; + statusName = "value"; + } + else + { + statusName = "out of range"; + } + } + else + { + statusName = "nothing"; + } + DPrintf(2, ("[%s] %s read for %s - 0x%x", + __FUNCTION__, + statusName, + pEntry->Name, + pEntry->ulValue)); + if (name.Buffer) NdisFreeString(name); +} + +static void DisableLSOv4Permanently(PARANDIS_ADAPTER *pContext, LPCSTR procname, LPCSTR reason) +{ + if (pContext->Offload.flagsValue & osbT4Lso) + { + DPrintf(0, ("[%s] Warning: %s", procname, reason)); + pContext->Offload.flagsValue &= ~osbT4Lso; + ParaNdis_ResetOffloadSettings(pContext, NULL, NULL); + } +} + +static void DisableLSOv6Permanently(PARANDIS_ADAPTER *pContext, LPCSTR procname, LPCSTR reason) +{ + if (pContext->Offload.flagsValue & osbT6Lso) + { + DPrintf(0, ("[%s] Warning: %s", procname, reason)); + pContext->Offload.flagsValue &= ~osbT6Lso; + ParaNdis_ResetOffloadSettings(pContext, NULL, NULL); + } +} + +static void DisableBothLSOPermanently(PARANDIS_ADAPTER *pContext, LPCSTR procname, LPCSTR reason) +{ + if (pContext->Offload.flagsValue & (osbT4Lso | osbT6Lso)) + { + DPrintf(0, ("[%s] Warning: %s", procname, reason)); + pContext->Offload.flagsValue &= ~(osbT6Lso | osbT4Lso); + ParaNdis_ResetOffloadSettings(pContext, NULL, NULL); + } +} + +/********************************************************** +Loads NIC parameters from adapter registry key +Parameters: + context + PUCHAR *ppNewMACAddress - pointer to hold MAC address if configured from host +***********************************************************/ +static void ReadNicConfiguration(PARANDIS_ADAPTER *pContext, PUCHAR *ppNewMACAddress) +{ + NDIS_HANDLE cfg; + tConfigurationEntries *pConfiguration = ParaNdis_AllocateMemory(pContext, sizeof(tConfigurationEntries)); + if (pConfiguration) + { + *pConfiguration = defaultConfiguration; + cfg = ParaNdis_OpenNICConfiguration(pContext); + if (cfg) + { + GetConfigurationEntry(cfg, &pConfiguration->isLogEnabled); + GetConfigurationEntry(cfg, &pConfiguration->debugLevel); + GetConfigurationEntry(cfg, &pConfiguration->ConnectRate); + GetConfigurationEntry(cfg, &pConfiguration->PrioritySupport); + GetConfigurationEntry(cfg, &pConfiguration->isPromiscuous); + GetConfigurationEntry(cfg, &pConfiguration->TxCapacity); + GetConfigurationEntry(cfg, &pConfiguration->RxCapacity); + GetConfigurationEntry(cfg, &pConfiguration->connectTimer); + GetConfigurationEntry(cfg, &pConfiguration->dpcChecker); + GetConfigurationEntry(cfg, &pConfiguration->InterruptRecovery); + GetConfigurationEntry(cfg, &pConfiguration->LogStatistics); + GetConfigurationEntry(cfg, &pConfiguration->PacketFiltering); + GetConfigurationEntry(cfg, &pConfiguration->ScatterGather); + GetConfigurationEntry(cfg, &pConfiguration->BatchReceive); + GetConfigurationEntry(cfg, &pConfiguration->OffloadTxChecksum); + GetConfigurationEntry(cfg, &pConfiguration->OffloadTxLSO); + GetConfigurationEntry(cfg, &pConfiguration->OffloadRxCS); + GetConfigurationEntry(cfg, &pConfiguration->OffloadGuestCS); + GetConfigurationEntry(cfg, &pConfiguration->UseSwTxChecksum); + GetConfigurationEntry(cfg, &pConfiguration->IPPacketsCheck); + GetConfigurationEntry(cfg, &pConfiguration->stdIpcsV4); + GetConfigurationEntry(cfg, &pConfiguration->stdTcpcsV4); + GetConfigurationEntry(cfg, &pConfiguration->stdTcpcsV6); + GetConfigurationEntry(cfg, &pConfiguration->stdUdpcsV4); + GetConfigurationEntry(cfg, &pConfiguration->stdUdpcsV6); + GetConfigurationEntry(cfg, &pConfiguration->stdLsoV1); + GetConfigurationEntry(cfg, &pConfiguration->stdLsoV2ip4); + GetConfigurationEntry(cfg, &pConfiguration->stdLsoV2ip6); + GetConfigurationEntry(cfg, &pConfiguration->PriorityVlanTagging); + GetConfigurationEntry(cfg, &pConfiguration->VlanId); + GetConfigurationEntry(cfg, &pConfiguration->UseMergeableBuffers); + GetConfigurationEntry(cfg, &pConfiguration->MTU); + GetConfigurationEntry(cfg, &pConfiguration->NumberOfHandledRXPackersInDPC); + GetConfigurationEntry(cfg, &pConfiguration->Indirect); + + #if !defined(WPP_EVENT_TRACING) + bDebugPrint = pConfiguration->isLogEnabled.ulValue; + nDebugLevel = pConfiguration->debugLevel.ulValue; + #endif + // ignoring promiscuous setting, nothing to do with it + pContext->maxFreeTxDescriptors = pConfiguration->TxCapacity.ulValue; + pContext->NetMaxReceiveBuffers = pConfiguration->RxCapacity.ulValue; + pContext->ulMilliesToConnect = pConfiguration->connectTimer.ulValue; + pContext->nEnableDPCChecker = pConfiguration->dpcChecker.ulValue; + pContext->bDoInterruptRecovery = pConfiguration->InterruptRecovery.ulValue != 0; + pContext->Limits.nPrintDiagnostic = pConfiguration->LogStatistics.ulValue; + pContext->uNumberOfHandledRXPacketsInDPC = pConfiguration->NumberOfHandledRXPackersInDPC.ulValue; + pContext->bDoSupportPriority = pConfiguration->PrioritySupport.ulValue != 0; + pContext->ulFormalLinkSpeed = pConfiguration->ConnectRate.ulValue; + pContext->ulFormalLinkSpeed *= 1000000; + pContext->bDoHwPacketFiltering = pConfiguration->PacketFiltering.ulValue != 0; + pContext->bUseScatterGather = pConfiguration->ScatterGather.ulValue != 0; + pContext->bBatchReceive = pConfiguration->BatchReceive.ulValue != 0; + pContext->bDoHardwareChecksum = pConfiguration->UseSwTxChecksum.ulValue == 0; + pContext->bDoGuestChecksumOnReceive = pConfiguration->OffloadGuestCS.ulValue != 0; + pContext->bDoIPCheckTx = pConfiguration->IPPacketsCheck.ulValue & 1; + pContext->bDoIPCheckRx = pConfiguration->IPPacketsCheck.ulValue & 2; + pContext->Offload.flagsValue = 0; + // TX caps: 1 - TCP, 2 - UDP, 4 - IP, 8 - TCPv6, 16 - UDPv6 + if (pConfiguration->OffloadTxChecksum.ulValue & 1) pContext->Offload.flagsValue |= osbT4TcpChecksum | osbT4TcpOptionsChecksum; + if (pConfiguration->OffloadTxChecksum.ulValue & 2) pContext->Offload.flagsValue |= osbT4UdpChecksum; + if (pConfiguration->OffloadTxChecksum.ulValue & 4) pContext->Offload.flagsValue |= osbT4IpChecksum | osbT4IpOptionsChecksum; + if (pConfiguration->OffloadTxChecksum.ulValue & 8) pContext->Offload.flagsValue |= osbT6TcpChecksum | osbT6TcpOptionsChecksum; + if (pConfiguration->OffloadTxChecksum.ulValue & 16) pContext->Offload.flagsValue |= osbT6UdpChecksum; + if (pConfiguration->OffloadTxLSO.ulValue) pContext->Offload.flagsValue |= osbT4Lso | osbT4LsoIp | osbT4LsoTcp; + if (pConfiguration->OffloadTxLSO.ulValue > 1) pContext->Offload.flagsValue |= osbT6Lso | osbT6LsoTcpOptions; + // RX caps: 1 - TCP, 2 - UDP, 4 - IP, 8 - TCPv6, 16 - UDPv6 + if (pConfiguration->OffloadRxCS.ulValue & 1) pContext->Offload.flagsValue |= osbT4RxTCPChecksum | osbT4RxTCPOptionsChecksum; + if (pConfiguration->OffloadRxCS.ulValue & 2) pContext->Offload.flagsValue |= osbT4RxUDPChecksum; + if (pConfiguration->OffloadRxCS.ulValue & 4) pContext->Offload.flagsValue |= osbT4RxIPChecksum | osbT4RxIPOptionsChecksum; + if (pConfiguration->OffloadRxCS.ulValue & 8) pContext->Offload.flagsValue |= osbT6RxTCPChecksum | osbT6RxTCPOptionsChecksum; + if (pConfiguration->OffloadRxCS.ulValue & 16) pContext->Offload.flagsValue |= osbT6RxUDPChecksum; + /* full packet size that can be configured as GSO for VIRTIO is short */ + /* NDIS test fails sometimes fails on segments 50-60K */ + pContext->Offload.maxPacketSize = PARANDIS_MAX_LSO_SIZE; + pContext->InitialOffloadParameters.IPv4Checksum = (UCHAR)pConfiguration->stdIpcsV4.ulValue; + pContext->InitialOffloadParameters.TCPIPv4Checksum = (UCHAR)pConfiguration->stdTcpcsV4.ulValue; + pContext->InitialOffloadParameters.TCPIPv6Checksum = (UCHAR)pConfiguration->stdTcpcsV6.ulValue; + pContext->InitialOffloadParameters.UDPIPv4Checksum = (UCHAR)pConfiguration->stdUdpcsV4.ulValue; + pContext->InitialOffloadParameters.UDPIPv6Checksum = (UCHAR)pConfiguration->stdUdpcsV6.ulValue; + pContext->InitialOffloadParameters.LsoV1 = (UCHAR)pConfiguration->stdLsoV1.ulValue; + pContext->InitialOffloadParameters.LsoV2IPv4 = (UCHAR)pConfiguration->stdLsoV2ip4.ulValue; + pContext->InitialOffloadParameters.LsoV2IPv6 = (UCHAR)pConfiguration->stdLsoV2ip6.ulValue; + pContext->ulPriorityVlanSetting = pConfiguration->PriorityVlanTagging.ulValue; + pContext->VlanId = pConfiguration->VlanId.ulValue & 0xfff; + pContext->bUseMergedBuffers = pConfiguration->UseMergeableBuffers.ulValue != 0; + pContext->MaxPacketSize.nMaxDataSize = pConfiguration->MTU.ulValue; + pContext->bUseIndirect = pConfiguration->Indirect.ulValue != 0; + if (!pContext->bDoSupportPriority) + pContext->ulPriorityVlanSetting = 0; + // if Vlan not supported + if (!IsVlanSupported(pContext)) + pContext->VlanId = 0; + if (1) + { + NDIS_STATUS status; + PVOID p; + UINT len = 0; + NdisReadNetworkAddress(&status, &p, &len, cfg); + if (status == NDIS_STATUS_SUCCESS && len == sizeof(pContext->CurrentMacAddress)) + { + *ppNewMACAddress = ParaNdis_AllocateMemory(pContext, sizeof(pContext->CurrentMacAddress)); + if (*ppNewMACAddress) + { + NdisMoveMemory(*ppNewMACAddress, p, len); + } + else + { + DPrintf(0, ("[%s] MAC address present, but some problem also...", __FUNCTION__)); + } + } + else if (len && len != sizeof(pContext->CurrentMacAddress)) + { + DPrintf(0, ("[%s] MAC address has wrong length of %d", __FUNCTION__, len)); + } + else + { + DPrintf(4, ("[%s] Nothing read for MAC, error %X", __FUNCTION__, status)); + } + } + NdisCloseConfiguration(cfg); + } + NdisFreeMemory(pConfiguration, 0, 0); + } +} + +void ParaNdis_ResetOffloadSettings(PARANDIS_ADAPTER *pContext, tOffloadSettingsFlags *pDest, PULONG from) +{ + if (!pDest) pDest = &pContext->Offload.flags; + if (!from) from = &pContext->Offload.flagsValue; + + pDest->fTxIPChecksum = !!(*from & osbT4IpChecksum); + pDest->fTxTCPChecksum = !!(*from & osbT4TcpChecksum); + pDest->fTxUDPChecksum = !!(*from & osbT4UdpChecksum); + pDest->fTxTCPOptions = !!(*from & osbT4TcpOptionsChecksum); + pDest->fTxIPOptions = !!(*from & osbT4IpOptionsChecksum); + + pDest->fTxLso = !!(*from & osbT4Lso); + pDest->fTxLsoIP = !!(*from & osbT4LsoIp); + pDest->fTxLsoTCP = !!(*from & osbT4LsoTcp); + + pDest->fRxIPChecksum = !!(*from & osbT4RxIPChecksum); + pDest->fRxIPOptions = !!(*from & osbT4RxIPOptionsChecksum); + pDest->fRxTCPChecksum = !!(*from & osbT4RxTCPChecksum); + pDest->fRxTCPOptions = !!(*from & osbT4RxTCPOptionsChecksum); + pDest->fRxUDPChecksum = !!(*from & osbT4RxUDPChecksum); + + pDest->fTxTCPv6Checksum = !!(*from & osbT6TcpChecksum); + pDest->fTxTCPv6Options = !!(*from & osbT6TcpOptionsChecksum); + pDest->fTxUDPv6Checksum = !!(*from & osbT6UdpChecksum); + pDest->fTxIPv6Ext = !!(*from & osbT6IpExtChecksum); + + pDest->fTxLsov6 = !!(*from & osbT6Lso); + pDest->fTxLsov6IP = !!(*from & osbT6LsoIpExt); + pDest->fTxLsov6TCP = !!(*from & osbT6LsoTcpOptions); + + pDest->fRxTCPv6Checksum = !!(*from & osbT6RxTCPChecksum); + pDest->fRxTCPv6Options = !!(*from & osbT6RxTCPOptionsChecksum); + pDest->fRxUDPv6Checksum = !!(*from & osbT6RxUDPChecksum); + pDest->fRxIPv6Ext = !!(*from & osbT6RxIpExtChecksum); +} + +/********************************************************** +Enumerates adapter resources and fills the structure holding them +Verifies that IO assigned and has correct size +Verifies that interrupt assigned +Parameters: + PNDIS_RESOURCE_LIST RList - list of resources, received from NDIS + tAdapterResources *pResources - structure to fill +Return value: + TRUE if everything is OK +***********************************************************/ +static BOOLEAN GetAdapterResources(NDIS_HANDLE MiniportHandle, PNDIS_RESOURCE_LIST RList, tAdapterResources *pResources) +{ + UINT i; + int read, bar = -1; + PCI_COMMON_HEADER pci_config; + NdisZeroMemory(pResources, sizeof(*pResources)); + + // read the PCI config space header + read = NdisReadPciSlotInformation( + MiniportHandle, + 0 /* SlotNumber, reserved */, + 0 /* Offset */, + &pci_config, + sizeof(pci_config)); + if (read != sizeof(pci_config)) { + return FALSE; + } + + for (i = 0; i < RList->Count; ++i) + { + ULONG type = RList->PartialDescriptors[i].Type; + if (type == CmResourceTypePort) + { + PHYSICAL_ADDRESS Start = RList->PartialDescriptors[i].u.Port.Start; + ULONG len = RList->PartialDescriptors[i].u.Port.Length; + DPrintf(0, ("Found IO ports at %08lX(%d)", Start.LowPart, len)); + bar = virtio_get_bar_index(&pci_config, Start); + if (bar < 0) { + break; + } + pResources->PciBars[bar].BasePA = Start; + pResources->PciBars[bar].uLength = len; + pResources->PciBars[bar].bPortSpace = TRUE; + } + else if (type == CmResourceTypeMemory) + { + PHYSICAL_ADDRESS Start = RList->PartialDescriptors[i].u.Memory.Start; + ULONG len = RList->PartialDescriptors[i].u.Memory.Length; + DPrintf(0, ("Found IO memory at %08I64X(%d)", Start.QuadPart, len)); + bar = virtio_get_bar_index(&pci_config, Start); + if (bar < 0) { + break; + } + pResources->PciBars[bar].BasePA = Start; + pResources->PciBars[bar].uLength = len; + pResources->PciBars[bar].bPortSpace = FALSE; + } + else if (type == CmResourceTypeInterrupt) + { + pResources->Vector = RList->PartialDescriptors[i].u.Interrupt.Vector; + pResources->Level = RList->PartialDescriptors[i].u.Interrupt.Level; + pResources->Affinity = RList->PartialDescriptors[i].u.Interrupt.Affinity; + pResources->InterruptFlags = RList->PartialDescriptors[i].Flags; + DPrintf(0, ("Found Interrupt vector %d, level %d, affinity %X, flags %X", + pResources->Vector, pResources->Level, (ULONG)pResources->Affinity, pResources->InterruptFlags)); + } + } + return bar >= 0 && pResources->Vector; +} + +static void DumpVirtIOFeatures(PARANDIS_ADAPTER *pContext) +{ + const struct { ULONG bitmask; const PCHAR Name; } Features[] = + { + + {VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM" }, + {VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM" }, + {VIRTIO_NET_F_MAC, "VIRTIO_NET_F_MAC" }, + {VIRTIO_NET_F_GSO, "VIRTIO_NET_F_GSO" }, + {VIRTIO_NET_F_GUEST_TSO4, "VIRTIO_NET_F_GUEST_TSO4"}, + {VIRTIO_NET_F_GUEST_TSO6, "VIRTIO_NET_F_GUEST_TSO6"}, + {VIRTIO_NET_F_GUEST_ECN, "VIRTIO_NET_F_GUEST_ECN"}, + {VIRTIO_NET_F_GUEST_UFO, "VIRTIO_NET_F_GUEST_UFO"}, + {VIRTIO_NET_F_HOST_TSO4, "VIRTIO_NET_F_HOST_TSO4"}, + {VIRTIO_NET_F_HOST_TSO6, "VIRTIO_NET_F_HOST_TSO6"}, + {VIRTIO_NET_F_HOST_ECN, "VIRTIO_NET_F_HOST_ECN"}, + {VIRTIO_NET_F_HOST_UFO, "VIRTIO_NET_F_HOST_UFO"}, + {VIRTIO_NET_F_MRG_RXBUF, "VIRTIO_NET_F_MRG_RXBUF"}, + {VIRTIO_NET_F_STATUS, "VIRTIO_NET_F_STATUS"}, + {VIRTIO_NET_F_CTRL_VQ, "VIRTIO_NET_F_CTRL_VQ"}, + {VIRTIO_NET_F_CTRL_RX, "VIRTIO_NET_F_CTRL_RX"}, + {VIRTIO_NET_F_CTRL_VLAN, "VIRTIO_NET_F_CTRL_VLAN"}, + {VIRTIO_NET_F_CTRL_RX_EXTRA, "VIRTIO_NET_F_CTRL_RX_EXTRA"}, + {VIRTIO_RING_F_INDIRECT_DESC, "VIRTIO_RING_F_INDIRECT_DESC"}, + {VIRTIO_F_VERSION_1, "VIRTIO_F_VERSION_1" }, + {VIRTIO_F_ANY_LAYOUT, "VIRTIO_F_ANY_LAYOUT" }, + }; + UINT i; + for (i = 0; i < sizeof(Features)/sizeof(Features[0]); ++i) + { + if (VirtIODeviceGetHostFeature(pContext, Features[i].bitmask)) + { + DPrintf(0, ("VirtIO Host Feature %s", Features[i].Name)); + } + } +} + +/********************************************************** + Only for test. Prints out if the interrupt line is ON +Parameters: +Return value: +***********************************************************/ +static void JustForCheckClearInterrupt(PARANDIS_ADAPTER *pContext, const char *Label) +{ + if (pContext->bEnableInterruptChecking) + { + ULONG ulActive; + ulActive = virtio_read_isr_status(&pContext->IODevice); + if (ulActive) + { + DPrintf(0,("WARNING: Interrupt Line %d(%s)!", ulActive, Label)); + } + } +} + +/********************************************************** +Prints out statistics +***********************************************************/ +static void PrintStatistics(PARANDIS_ADAPTER *pContext) +{ + ULONG64 totalTxFrames = + pContext->Statistics.ifHCOutBroadcastPkts + + pContext->Statistics.ifHCOutMulticastPkts + + pContext->Statistics.ifHCOutUcastPkts; + ULONG64 totalRxFrames = + pContext->Statistics.ifHCInBroadcastPkts + + pContext->Statistics.ifHCInMulticastPkts + + pContext->Statistics.ifHCInUcastPkts; + + DPrintf(0, ("[Diag!%X] RX buffers at VIRTIO %d of %d", + pContext->CurrentMacAddress[5], + pContext->NetNofReceiveBuffers, + pContext->NetMaxReceiveBuffers)); + DPrintf(0, ("[Diag!] TX desc available %d/%d, buf %d/min. %d", + pContext->nofFreeTxDescriptors, + pContext->maxFreeTxDescriptors, + pContext->nofFreeHardwareBuffers, + pContext->minFreeHardwareBuffers)); + pContext->minFreeHardwareBuffers = pContext->nofFreeHardwareBuffers; + if (pContext->NetTxPacketsToReturn) + { + DPrintf(0, ("[Diag!] TX packets to return %d", pContext->NetTxPacketsToReturn)); + } + DPrintf(0, ("[Diag!] Bytes transmitted %I64u, received %I64u", + pContext->Statistics.ifHCOutOctets, + pContext->Statistics.ifHCInOctets)); + DPrintf(0, ("[Diag!] Tx frames %I64u, CSO %d, LSO %d, indirect %d", + totalTxFrames, + pContext->extraStatistics.framesCSOffload, + pContext->extraStatistics.framesLSO, + pContext->extraStatistics.framesIndirect)); + DPrintf(0, ("[Diag!] Rx frames %I64u, Rx.Pri %d, RxHwCS.OK %d, FiltOut %d", + totalRxFrames, pContext->extraStatistics.framesRxPriority, + pContext->extraStatistics.framesRxCSHwOK, pContext->extraStatistics.framesFilteredOut)); + if (pContext->extraStatistics.framesRxCSHwMissedBad || pContext->extraStatistics.framesRxCSHwMissedGood) + { + DPrintf(0, ("[Diag!] RxHwCS mistakes: missed bad %d, missed good %d", + pContext->extraStatistics.framesRxCSHwMissedBad, pContext->extraStatistics.framesRxCSHwMissedGood)); + } +} + +static NDIS_STATUS NTStatusToNdisStatus(NTSTATUS nt_status) { + switch (nt_status) { + case STATUS_SUCCESS: + return NDIS_STATUS_SUCCESS; + case STATUS_NOT_FOUND: + case STATUS_DEVICE_NOT_CONNECTED: + return NDIS_STATUS_ADAPTER_NOT_FOUND; + case STATUS_INSUFFICIENT_RESOURCES: + return NDIS_STATUS_RESOURCES; + case STATUS_INVALID_PARAMETER: + return NDIS_STATUS_INVALID_DEVICE_REQUEST; + default: + return NDIS_STATUS_FAILURE; + } +} + +static NDIS_STATUS FinalizeFeatures(PARANDIS_ADAPTER *pContext) +{ + NTSTATUS nt_status = virtio_set_features(&pContext->IODevice, pContext->ullGuestFeatures); + if (!NT_SUCCESS(nt_status)) { + DPrintf(0, ("[%s] virtio_set_features failed with %x\n", __FUNCTION__, nt_status)); + } + return NTStatusToNdisStatus(nt_status); +} + +/********************************************************** +Initializes the context structure +Major variables, received from NDIS on initialization, must be be set before this call +(for ex. pContext->MiniportHandle) + +If this procedure fails, no need to call + ParaNdis_CleanupContext + + +Parameters: +Return value: + SUCCESS, if resources are OK + NDIS_STATUS_RESOURCE_CONFLICT if not +***********************************************************/ +NDIS_STATUS ParaNdis_InitializeContext( + PARANDIS_ADAPTER *pContext, + PNDIS_RESOURCE_LIST pResourceList) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + PUCHAR pNewMacAddress = NULL; + USHORT linkStatus = 0; + NTSTATUS nt_status; + + DEBUG_ENTRY(0); + /* read first PCI IO bar*/ + //ulIOAddress = ReadPCIConfiguration(miniportAdapterHandle, 0x10); + /* check this is IO and assigned */ + ReadNicConfiguration(pContext, &pNewMacAddress); + if (pNewMacAddress) + { + if (ParaNdis_ValidateMacAddress(pNewMacAddress, TRUE)) + { + DPrintf(0, ("[%s] WARNING: MAC address reloaded", __FUNCTION__)); + NdisMoveMemory(pContext->CurrentMacAddress, pNewMacAddress, sizeof(pContext->CurrentMacAddress)); + } + else + { + DPrintf(0, ("[%s] WARNING: Invalid MAC address ignored", __FUNCTION__)); + } + NdisFreeMemory(pNewMacAddress, 0, 0); + } + + pContext->MaxPacketSize.nMaxFullSizeOS = pContext->MaxPacketSize.nMaxDataSize + ETH_HEADER_SIZE; + pContext->MaxPacketSize.nMaxFullSizeHwTx = pContext->MaxPacketSize.nMaxFullSizeOS; + pContext->MaxPacketSize.nMaxFullSizeHwRx = pContext->MaxPacketSize.nMaxFullSizeOS + ETH_PRIORITY_HEADER_SIZE; + if (pContext->ulPriorityVlanSetting) + pContext->MaxPacketSize.nMaxFullSizeHwTx = pContext->MaxPacketSize.nMaxFullSizeHwRx; + + if (GetAdapterResources(pContext->MiniportHandle, pResourceList, &pContext->AdapterResources)) + { + if (pContext->AdapterResources.InterruptFlags & CM_RESOURCE_INTERRUPT_MESSAGE) + { + DPrintf(0, ("[%s] Message interrupt assigned", __FUNCTION__)); + pContext->bUsingMSIX = TRUE; + } + + nt_status = virtio_device_initialize( + &pContext->IODevice, + &ParaNdisSystemOps, + pContext, + pContext->bUsingMSIX); + if (!NT_SUCCESS(nt_status)) { + DPrintf(0, ("[%s] virtio_device_initialize failed with %x\n", __FUNCTION__, nt_status)); + status = NTStatusToNdisStatus(nt_status); + DEBUG_EXIT_STATUS(0, status); + return status; + } + + pContext->bIODeviceInitialized = TRUE; + JustForCheckClearInterrupt(pContext, "init 0"); + ParaNdis_ResetVirtIONetDevice(pContext); + JustForCheckClearInterrupt(pContext, "init 1"); + virtio_add_status(&pContext->IODevice, VIRTIO_CONFIG_S_ACKNOWLEDGE); + JustForCheckClearInterrupt(pContext, "init 2"); + virtio_add_status(&pContext->IODevice, VIRTIO_CONFIG_S_DRIVER); + pContext->ullHostFeatures = virtio_get_features(&pContext->IODevice); + DumpVirtIOFeatures(pContext); + JustForCheckClearInterrupt(pContext, "init 3"); + pContext->bLinkDetectSupported = 0 != VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_STATUS); + + if(pContext->bLinkDetectSupported) { + virtio_get_config(&pContext->IODevice, sizeof(pContext->CurrentMacAddress), &linkStatus, sizeof(linkStatus)); + pContext->bConnected = (linkStatus & VIRTIO_NET_S_LINK_UP) != 0; + DPrintf(0, ("[%s] Link status on driver startup: %d", __FUNCTION__, pContext->bConnected)); + } + + if (VirtIODeviceGetHostFeature(pContext, VIRTIO_F_VERSION_1)) + { + // virtio 1.0 always uses the extended header + pContext->nVirtioHeaderSize = sizeof(virtio_net_hdr_ext); + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_F_VERSION_1); + } + else + { + pContext->nVirtioHeaderSize = sizeof(virtio_net_hdr_basic); + } + + if (VirtIODeviceGetHostFeature(pContext, VIRTIO_F_ANY_LAYOUT)) + { + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_F_ANY_LAYOUT); + } + if (VirtIODeviceGetHostFeature(pContext, VIRTIO_RING_F_EVENT_IDX)) + { + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_RING_F_EVENT_IDX); + } + + if (!pContext->bUseMergedBuffers && VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_MRG_RXBUF)) + { + DPrintf(0, ("[%s] Not using mergeable buffers", __FUNCTION__)); + } + else + { + pContext->bUseMergedBuffers = VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_MRG_RXBUF) != 0; + if (pContext->bUseMergedBuffers) + { + pContext->nVirtioHeaderSize = sizeof(virtio_net_hdr_ext); + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_NET_F_MRG_RXBUF); + } + } + if (VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_MAC)) + { + virtio_get_config( + &pContext->IODevice, + 0, // + offsetof(struct virtio_net_config, mac) + &pContext->PermanentMacAddress, + ETH_LENGTH_OF_ADDRESS); + if (!ParaNdis_ValidateMacAddress(pContext->PermanentMacAddress, FALSE)) + { + DPrintf(0,("Invalid device MAC ignored(%02x-%02x-%02x-%02x-%02x-%02x)", + pContext->PermanentMacAddress[0], + pContext->PermanentMacAddress[1], + pContext->PermanentMacAddress[2], + pContext->PermanentMacAddress[3], + pContext->PermanentMacAddress[4], + pContext->PermanentMacAddress[5])); + NdisZeroMemory(pContext->PermanentMacAddress, sizeof(pContext->PermanentMacAddress)); + } + } + + if (ETH_IS_EMPTY(pContext->PermanentMacAddress)) + { + DPrintf(0, ("No device MAC present, use default")); + pContext->PermanentMacAddress[0] = 0x02; + pContext->PermanentMacAddress[1] = 0x50; + pContext->PermanentMacAddress[2] = 0xF2; + pContext->PermanentMacAddress[3] = 0x00; + pContext->PermanentMacAddress[4] = 0x01; + pContext->PermanentMacAddress[5] = 0x80 | (UCHAR)(pContext->ulUniqueID & 0xFF); + } + DPrintf(0,("Device MAC = %02x-%02x-%02x-%02x-%02x-%02x", + pContext->PermanentMacAddress[0], + pContext->PermanentMacAddress[1], + pContext->PermanentMacAddress[2], + pContext->PermanentMacAddress[3], + pContext->PermanentMacAddress[4], + pContext->PermanentMacAddress[5])); + + if (ETH_IS_EMPTY(pContext->CurrentMacAddress)) + { + NdisMoveMemory( + &pContext->CurrentMacAddress, + &pContext->PermanentMacAddress, + ETH_LENGTH_OF_ADDRESS); + } + else + { + DPrintf(0,("Current MAC = %02x-%02x-%02x-%02x-%02x-%02x", + pContext->CurrentMacAddress[0], + pContext->CurrentMacAddress[1], + pContext->CurrentMacAddress[2], + pContext->CurrentMacAddress[3], + pContext->CurrentMacAddress[4], + pContext->CurrentMacAddress[5])); + } + if (VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_CTRL_VQ)) { + pContext->bHasControlQueue = TRUE; + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_NET_F_CTRL_VQ); + } + } + else + { + DPrintf(0, ("[%s] Error: Incomplete resources", __FUNCTION__)); + status = NDIS_STATUS_RESOURCE_CONFLICT; + } + + + if (pContext->bDoHardwareChecksum) + { + ULONG dependentOptions; + dependentOptions = osbT4TcpChecksum | osbT4UdpChecksum | osbT4TcpOptionsChecksum; + if (!VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_CSUM) && + (pContext->Offload.flagsValue & dependentOptions)) + { + DPrintf(0, ("[%s] Host does not support CSUM, disabling CS offload", __FUNCTION__) ); + pContext->Offload.flagsValue &= ~dependentOptions; + } + } + + if (pContext->bDoGuestChecksumOnReceive && VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_GUEST_CSUM)) + { + DPrintf(0, ("[%s] Enabling guest checksum", __FUNCTION__) ); + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_NET_F_GUEST_CSUM); + } + else + { + pContext->bDoGuestChecksumOnReceive = FALSE; + } + + // now, after we checked the capabilities, we can initialize current + // configuration of offload tasks + ParaNdis_ResetOffloadSettings(pContext, NULL, NULL); + if (pContext->Offload.flags.fTxLso && !pContext->bUseScatterGather) + { + DisableBothLSOPermanently(pContext, __FUNCTION__, "SG is not active"); + } + if (pContext->Offload.flags.fTxLso && + !VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_HOST_TSO4)) + { + DisableLSOv4Permanently(pContext, __FUNCTION__, "Host does not support TSOv4"); + } + if (pContext->Offload.flags.fTxLsov6 && + !VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_HOST_TSO6)) + { + DisableLSOv6Permanently(pContext, __FUNCTION__, "Host does not support TSOv6"); + } + if (pContext->bUseIndirect) + { + const char *reason = ""; + if (!VirtIODeviceGetHostFeature(pContext, VIRTIO_RING_F_INDIRECT_DESC)) + { + pContext->bUseIndirect = FALSE; + reason = "Host support"; + } + else if (!pContext->bUseScatterGather) + { + pContext->bUseIndirect = FALSE; + reason = "SG"; + } + DPrintf(0, ("[%s] %sable indirect Tx(!%s)", __FUNCTION__, pContext->bUseIndirect ? "En" : "Dis", reason) ); + } + + if (VirtIODeviceGetHostFeature(pContext, VIRTIO_NET_F_CTRL_RX_EXTRA) && + pContext->bDoHwPacketFiltering) + { + DPrintf(0, ("[%s] Using hardware packet filtering", __FUNCTION__)); + pContext->bHasHardwareFilters = TRUE; + } + + status = FinalizeFeatures(pContext); + + pContext->ReuseBufferProc = (tReuseReceiveBufferProc)ReuseReceiveBufferRegular; + + NdisInitializeEvent(&pContext->ResetEvent); + DEBUG_EXIT_STATUS(0, status); + return status; +} + +/********************************************************** +Free the resources allocated for VirtIO buffer descriptor +Parameters: + PVOID pParam pIONetDescriptor to free + BOOLEAN bRemoveFromList TRUE, if also remove it from list +***********************************************************/ +static void VirtIONetFreeBufferDescriptor(PARANDIS_ADAPTER *pContext, pIONetDescriptor pBufferDescriptor) +{ + if(pBufferDescriptor) + { + if (pBufferDescriptor->pHolder) + ParaNdis_UnbindBufferFromPacket(pContext, pBufferDescriptor); + if (pBufferDescriptor->DataInfo.Virtual) + ParaNdis_FreePhysicalMemory(pContext, &pBufferDescriptor->DataInfo); + if (pBufferDescriptor->HeaderInfo.Virtual) + ParaNdis_FreePhysicalMemory(pContext, &pBufferDescriptor->HeaderInfo); + NdisFreeMemory(pBufferDescriptor, 0, 0); + } +} + +/********************************************************** +Free all the buffer descriptors from specified list +Parameters: + PLIST_ENTRY pListRoot list containing pIONetDescriptor structures + PNDIS_SPIN_LOCK pLock lock to protest this list +Return value: +***********************************************************/ +static void FreeDescriptorsFromList(PARANDIS_ADAPTER *pContext, PLIST_ENTRY pListRoot, PNDIS_SPIN_LOCK pLock) +{ + pIONetDescriptor pBufferDescriptor; + LIST_ENTRY TempList; + InitializeListHead(&TempList); + NdisAcquireSpinLock(pLock); + while(!IsListEmpty(pListRoot)) + { + pBufferDescriptor = (pIONetDescriptor)RemoveHeadList(pListRoot); + InsertTailList(&TempList, &pBufferDescriptor->listEntry); + } + NdisReleaseSpinLock(pLock); + while(!IsListEmpty(&TempList)) + { + pBufferDescriptor = (pIONetDescriptor)RemoveHeadList(&TempList); + VirtIONetFreeBufferDescriptor(pContext, pBufferDescriptor); + } +} + +static pIONetDescriptor AllocatePairOfBuffersOnInit( + PARANDIS_ADAPTER *pContext, + ULONG size1, + ULONG size2, + BOOLEAN bForTx) +{ + pIONetDescriptor p; + p = (pIONetDescriptor)ParaNdis_AllocateMemory(pContext, sizeof(*p)); + if (p) + { + BOOLEAN b1 = FALSE, b2 = FALSE; + NdisZeroMemory(p, sizeof(*p)); + p->HeaderInfo.size = size1; + p->DataInfo.size = size2; + p->HeaderInfo.IsCached = p->DataInfo.IsCached = 1; + p->HeaderInfo.IsTX = p->DataInfo.IsTX = bForTx; + p->nofUsedBuffers = 0; + b1 = ParaNdis_InitialAllocatePhysicalMemory(pContext, &p->HeaderInfo); + if (b1) b2 = ParaNdis_InitialAllocatePhysicalMemory(pContext, &p->DataInfo); + if (b1 && b2) + { + BOOLEAN b = bForTx || ParaNdis_BindBufferToPacket(pContext, p); + if (!b) + { + DPrintf(0, ("[INITPHYS](%s) Failed to bind memory to net packet", bForTx ? "TX" : "RX")); + VirtIONetFreeBufferDescriptor(pContext, p); + p = NULL; + } + } + else + { + if (b1) ParaNdis_FreePhysicalMemory(pContext, &p->HeaderInfo); + if (b2) ParaNdis_FreePhysicalMemory(pContext, &p->DataInfo); + NdisFreeMemory(p, 0, 0); + p = NULL; + DPrintf(0, ("[INITPHYS](%s) Failed to allocate memory block", bForTx ? "TX" : "RX")); + } + } + if (p) + { + DPrintf(3, ("[INITPHYS](%s) Header v%p(p%08lX), Data v%p(p%08lX)", bForTx ? "TX" : "RX", + p->HeaderInfo.Virtual, p->HeaderInfo.Physical.LowPart, + p->DataInfo.Virtual, p->DataInfo.Physical.LowPart)); + } + return p; +} + +/********************************************************** +Allocates TX buffers according to startup setting (pContext->maxFreeTxDescriptors as got from registry) +Buffers are chained in NetFreeSendBuffers +Parameters: + context +***********************************************************/ +static void PrepareTransmitBuffers(PARANDIS_ADAPTER *pContext) +{ + UINT nBuffers, nMaxBuffers; + DEBUG_ENTRY(4); + nMaxBuffers = virtio_get_queue_size(pContext->NetSendQueue) / 2; + if (nMaxBuffers > pContext->maxFreeTxDescriptors) nMaxBuffers = pContext->maxFreeTxDescriptors; + + for (nBuffers = 0; nBuffers < nMaxBuffers; ++nBuffers) + { + pIONetDescriptor pBuffersDescriptor = + AllocatePairOfBuffersOnInit( + pContext, + pContext->nVirtioHeaderSize, + pContext->MaxPacketSize.nMaxFullSizeHwTx, + TRUE); + if (!pBuffersDescriptor) break; + + NdisZeroMemory(pBuffersDescriptor->HeaderInfo.Virtual, pBuffersDescriptor->HeaderInfo.size); + InsertTailList(&pContext->NetFreeSendBuffers, &pBuffersDescriptor->listEntry); + pContext->nofFreeTxDescriptors++; + } + + pContext->maxFreeTxDescriptors = pContext->nofFreeTxDescriptors; + pContext->nofFreeHardwareBuffers = pContext->nofFreeTxDescriptors * 2; + pContext->maxFreeHardwareBuffers = pContext->minFreeHardwareBuffers = pContext->nofFreeHardwareBuffers; + DPrintf(0, ("[%s] available %d Tx descriptors, %d hw buffers", + __FUNCTION__, pContext->nofFreeTxDescriptors, pContext->nofFreeHardwareBuffers)); +} + +static BOOLEAN AddRxBufferToQueue(PARANDIS_ADAPTER *pContext, pIONetDescriptor pBufferDescriptor) +{ + UINT nBuffersToSubmit = 2; + struct VirtIOBufferDescriptor sg[2]; + if (!pContext->bUseMergedBuffers) + { + sg[0].physAddr = pBufferDescriptor->HeaderInfo.Physical; + sg[0].length = pBufferDescriptor->HeaderInfo.size; + sg[1].physAddr = pBufferDescriptor->DataInfo.Physical; + sg[1].length = pBufferDescriptor->DataInfo.size; + } + else + { + sg[0].physAddr = pBufferDescriptor->DataInfo.Physical; + sg[0].length = pBufferDescriptor->DataInfo.size; + nBuffersToSubmit = 1; + } + return 0 <= virtqueue_add_buf( + pContext->NetReceiveQueue, + sg, + 0, + nBuffersToSubmit, + pBufferDescriptor, + NULL, + 0); +} + + +/********************************************************** +Allocates maximum RX buffers for incoming packets +Buffers are chained in NetReceiveBuffers +Parameters: + context +***********************************************************/ +static int PrepareReceiveBuffers(PARANDIS_ADAPTER *pContext) +{ + int nRet = 0; + UINT i; + DEBUG_ENTRY(4); + + for (i = 0; i < pContext->NetMaxReceiveBuffers; ++i) + { + ULONG size1 = pContext->bUseMergedBuffers ? 4 : pContext->nVirtioHeaderSize; + ULONG size2 = pContext->MaxPacketSize.nMaxFullSizeHwRx + + (pContext->bUseMergedBuffers ? pContext->nVirtioHeaderSize : 0); + pIONetDescriptor pBuffersDescriptor = + AllocatePairOfBuffersOnInit(pContext, size1, size2, FALSE); + if (!pBuffersDescriptor) break; + + if (!AddRxBufferToQueue(pContext, pBuffersDescriptor)) + { + VirtIONetFreeBufferDescriptor(pContext, pBuffersDescriptor); + break; + } + + InsertTailList(&pContext->NetReceiveBuffers, &pBuffersDescriptor->listEntry); + + pContext->NetNofReceiveBuffers++; + } + + pContext->NetMaxReceiveBuffers = pContext->NetNofReceiveBuffers; + DPrintf(0, ("[%s] MaxReceiveBuffers %d\n", __FUNCTION__, pContext->NetMaxReceiveBuffers) ); + + virtqueue_kick(pContext->NetReceiveQueue); + + return nRet; +} + +static NDIS_STATUS FindNetQueues(PARANDIS_ADAPTER *pContext) +{ + struct virtqueue *queues[3]; + unsigned nvqs = pContext->bHasControlQueue ? 3 : 2; + NTSTATUS status; + + // We work with two or three virtqueues, 0 - receive, 1 - send, 2 - control + status = virtio_find_queues( + &pContext->IODevice, + nvqs, + queues); + if (!NT_SUCCESS(status)) { + DPrintf(0, ("[%s] virtio_find_queues failed with %x\n", __FUNCTION__, status)); + return NTStatusToNdisStatus(status); + } + + pContext->NetReceiveQueue = queues[0]; + pContext->NetSendQueue = queues[1]; + if (pContext->bHasControlQueue) { + pContext->NetControlQueue = queues[2]; + } + + return NDIS_STATUS_SUCCESS; +} + +// called on PASSIVE upon unsuccessful Init or upon Halt +static void DeleteNetQueues(PARANDIS_ADAPTER *pContext) +{ + virtio_delete_queues(&pContext->IODevice); +} + +/********************************************************** +Initializes VirtIO buffering and related stuff: +Allocates RX and TX queues and buffers +Parameters: + context +Return value: + TRUE if both queues are allocated +***********************************************************/ +static NDIS_STATUS ParaNdis_VirtIONetInit(PARANDIS_ADAPTER *pContext) +{ + NDIS_STATUS status; + DEBUG_ENTRY(0); + + pContext->ControlData.IsCached = 1; + pContext->ControlData.size = 512; + + status = FindNetQueues(pContext); + if (status != NDIS_STATUS_SUCCESS) { + return status; + } + + if (pContext->NetReceiveQueue && pContext->NetSendQueue) + { + PrepareTransmitBuffers(pContext); + PrepareReceiveBuffers(pContext); + + if (pContext->NetControlQueue) + ParaNdis_InitialAllocatePhysicalMemory(pContext, &pContext->ControlData); + if (!pContext->NetControlQueue || !pContext->ControlData.Virtual) + { + DPrintf(0, ("[%s] The Control vQueue does not work!\n", __FUNCTION__) ); + pContext->bHasHardwareFilters = FALSE; + } + if (pContext->nofFreeTxDescriptors && + pContext->NetMaxReceiveBuffers && + pContext->maxFreeHardwareBuffers) + { + pContext->sgTxGatherTable = ParaNdis_AllocateMemory(pContext, + pContext->maxFreeHardwareBuffers * sizeof(pContext->sgTxGatherTable[0])); + if (!pContext->sgTxGatherTable) + { + DisableBothLSOPermanently(pContext, __FUNCTION__, "Can not allocate SG table"); + } + status = NDIS_STATUS_SUCCESS; + } + } + else + { + DeleteNetQueues(pContext); + status = NDIS_STATUS_RESOURCES; + } + return status; +} + +static void VirtIODeviceRemoveStatus(VirtIODevice *vdev, u8 status) +{ + virtio_set_status( + vdev, + virtio_get_status(vdev) & ~status); +} + +/********************************************************** +Finishes initialization of context structure, calling also version dependent part +If this procedure failed, ParaNdis_CleanupContext must be called +Parameters: + context +Return value: + SUCCESS or some kind of failure +***********************************************************/ +NDIS_STATUS ParaNdis_FinishInitialization(PARANDIS_ADAPTER *pContext) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + DEBUG_ENTRY(0); + + NdisAllocateSpinLock(&pContext->SendLock); +#if !defined(UNIFY_LOCKS) + NdisAllocateSpinLock(&pContext->ReceiveLock); +#endif + + InitializeListHead(&pContext->NetReceiveBuffers); + InitializeListHead(&pContext->NetReceiveBuffersWaiting); + InitializeListHead(&pContext->NetSendBuffersInUse); + InitializeListHead(&pContext->NetFreeSendBuffers); + + status = ParaNdis_FinishSpecificInitialization(pContext); + + if (status == NDIS_STATUS_SUCCESS) + { + status = ParaNdis_VirtIONetInit(pContext); + } + + pContext->Limits.nReusedRxBuffers = pContext->NetMaxReceiveBuffers / 4 + 1; + + if (status == NDIS_STATUS_SUCCESS) + { + JustForCheckClearInterrupt(pContext, "start 3"); + pContext->bEnableInterruptHandlingDPC = TRUE; + ParaNdis_SetPowerState(pContext, NdisDeviceStateD0); + virtio_device_ready(&pContext->IODevice); + JustForCheckClearInterrupt(pContext, "start 4"); + ParaNdis_UpdateDeviceFilters(pContext); + } + else + { + virtio_add_status(&pContext->IODevice, VIRTIO_CONFIG_S_FAILED); + } + DEBUG_EXIT_STATUS(0, status); + return status; +} + +/********************************************************** +Releases VirtIO related resources - queues and buffers +Parameters: + context +Return value: +***********************************************************/ +static void VirtIONetRelease(PARANDIS_ADAPTER *pContext) +{ + BOOLEAN b; + DEBUG_ENTRY(0); + + /* list NetReceiveBuffersWaiting must be free */ + do + { + NdisAcquireSpinLock(&pContext->ReceiveLock); + b = !IsListEmpty(&pContext->NetReceiveBuffersWaiting); + NdisReleaseSpinLock(&pContext->ReceiveLock); + if (b) + { + DPrintf(0, ("[%s] There are waiting buffers", __FUNCTION__)); + PrintStatistics(pContext); + NdisMSleep(5000000); + } + }while (b); + + DeleteNetQueues(pContext); + virtio_device_shutdown(&pContext->IODevice); + pContext->bIODeviceInitialized = FALSE; + + /* intentionally commented out + FreeDescriptorsFromList( + pContext, + &pContext->NetReceiveBuffersWaiting, + &pContext->ReceiveLock); + */ + + /* this can be freed, queue shut down */ + FreeDescriptorsFromList( + pContext, + &pContext->NetReceiveBuffers, + &pContext->ReceiveLock); + + /* this can be freed, queue shut down */ + FreeDescriptorsFromList( + pContext, + &pContext->NetSendBuffersInUse, + &pContext->SendLock); + + /* this can be freed, send disabled */ + FreeDescriptorsFromList( + pContext, + &pContext->NetFreeSendBuffers, + &pContext->SendLock); + + if (pContext->ControlData.Virtual) + ParaNdis_FreePhysicalMemory(pContext, &pContext->ControlData); + + PrintStatistics(pContext); + if (pContext->sgTxGatherTable) + { + NdisFreeMemory(pContext->sgTxGatherTable, 0, 0); + } +} + +static void PreventDPCServicing(PARANDIS_ADAPTER *pContext) +{ + LONG inside;; + pContext->bEnableInterruptHandlingDPC = FALSE; + do + { + inside = InterlockedIncrement(&pContext->counterDPCInside); + InterlockedDecrement(&pContext->counterDPCInside); + if (inside > 1) + { + DPrintf(0, ("[%s] waiting!", __FUNCTION__)); + NdisMSleep(20000); + } + } while (inside > 1); +} + +/********************************************************** +Frees all the resources allocated when the context initialized, + calling also version-dependent part +Parameters: + context +***********************************************************/ +VOID ParaNdis_CleanupContext(PARANDIS_ADAPTER *pContext) +{ + UINT i; + + /* disable any interrupt generation */ + if (pContext->IODevice.addr) + { + //int nActive; + //nActive = virtio_read_isr_status(&pContext->IODevice); + /* back compat - remove the OK flag only in legacy mode */ + VirtIODeviceRemoveStatus(&pContext->IODevice, VIRTIO_CONFIG_S_DRIVER_OK); + JustForCheckClearInterrupt(pContext, "exit 1"); + //nActive += virtio_read_isr_status(&pContext->IODevice); + //nActive += virtio_read_isr_status(&pContext->IODevice); + //DPrintf(0, ("cleanup %d", nActive)); + } + + PreventDPCServicing(pContext); + + /**************************************** + ensure all the incoming packets returned, + free all the buffers and their descriptors + *****************************************/ + + if (pContext->bIODeviceInitialized) + { + JustForCheckClearInterrupt(pContext, "exit 2"); + ParaNdis_ResetVirtIONetDevice(pContext); + JustForCheckClearInterrupt(pContext, "exit 3"); + } + + ParaNdis_SetPowerState(pContext, NdisDeviceStateD3); + VirtIONetRelease(pContext); + + ParaNdis_FinalizeCleanup(pContext); + + if (pContext->SendLock.SpinLock) + { + NdisFreeSpinLock(&pContext->SendLock); + } + +#if !defined(UNIFY_LOCKS) + if (pContext->ReceiveLock.SpinLock) + { + NdisFreeSpinLock(&pContext->ReceiveLock); + } +#endif + + /* free queue shared memory */ + for (i = 0; i < MAX_NUM_OF_QUEUES; i++) { + if (pContext->SharedMemoryRanges[i].pBase != NULL) { + NdisMFreeSharedMemory( + pContext->MiniportHandle, + pContext->SharedMemoryRanges[i].uLength, + TRUE /* Cached */, + pContext->SharedMemoryRanges[i].pBase, + pContext->SharedMemoryRanges[i].BasePA); + pContext->SharedMemoryRanges[i].pBase = NULL; + } + } + + /* unmap our port and memory IO resources */ + for (i = 0; i < PCI_TYPE0_ADDRESSES; i++) + { + tBusResource *pRes = &pContext->AdapterResources.PciBars[i]; + if (pRes->pBase != NULL) + { + if (pRes->bPortSpace) + { + NdisMDeregisterIoPortRange( + pContext->MiniportHandle, + pRes->BasePA.LowPart, + pRes->uLength, + pRes->pBase); + } + else + { + NdisMUnmapIoSpace( + pContext->MiniportHandle, + pRes->pBase, + pRes->uLength); + } + } + } +} + + +/********************************************************** +System shutdown handler (shutdown, restart, bugcheck) +Parameters: + context +***********************************************************/ +VOID ParaNdis_OnShutdown(PARANDIS_ADAPTER *pContext) +{ + DEBUG_ENTRY(0); // this is only for kdbg :) + ParaNdis_ResetVirtIONetDevice(pContext); +} + +/********************************************************** +Handles hardware interrupt +Parameters: + context + ULONG knownInterruptSources - bitmask of +Return value: + TRUE, if it is our interrupt + sets *pRunDpc to TRUE if the DPC should be fired +***********************************************************/ +BOOLEAN ParaNdis_OnLegacyInterrupt( + PARANDIS_ADAPTER *pContext, + OUT BOOLEAN *pRunDpc) +{ + ULONG status = virtio_read_isr_status(&pContext->IODevice); + + if((status == 0) || + (status == VIRTIO_NET_INVALID_INTERRUPT_STATUS) || + (pContext->powerState != NdisDeviceStateD0)) + { + *pRunDpc = FALSE; + return FALSE; + } + + PARANDIS_STORE_LAST_INTERRUPT_TIMESTAMP(pContext); + ParaNdis_VirtIODisableIrqSynchronized(pContext, isAny); + InterlockedOr(&pContext->InterruptStatus, (LONG) ((status & isControl) | isReceive | isTransmit)); + *pRunDpc = TRUE; + return TRUE; +} + +BOOLEAN ParaNdis_OnQueuedInterrupt( + PARANDIS_ADAPTER *pContext, + OUT BOOLEAN *pRunDpc, + ULONG knownInterruptSources) +{ + struct virtqueue* _vq = ParaNdis_GetQueueForInterrupt(pContext, knownInterruptSources); + + /* If interrupts for this queue disabled do nothing */ + if((_vq != NULL) && !ParaNDIS_IsQueueInterruptEnabled(_vq)) + { + *pRunDpc = FALSE; + } + else + { + PARANDIS_STORE_LAST_INTERRUPT_TIMESTAMP(pContext); + InterlockedOr(&pContext->InterruptStatus, (LONG)knownInterruptSources); + ParaNdis_VirtIODisableIrqSynchronized(pContext, knownInterruptSources); + *pRunDpc = TRUE; + } + + return *pRunDpc; +} + + +/********************************************************** +It is called from Rx processing routines in regular mode of operation. +Returns received buffer back to VirtIO queue, inserting it to NetReceiveBuffers. +If needed, signals end of RX pause operation + +Must be called with &pContext->ReceiveLock acquired + +Parameters: + context + void *pDescriptor - pIONetDescriptor to return +***********************************************************/ +void ReuseReceiveBufferRegular(PARANDIS_ADAPTER *pContext, pIONetDescriptor pBuffersDescriptor) +{ + DEBUG_ENTRY(4); + + if(!pBuffersDescriptor) + return; + + RemoveEntryList(&pBuffersDescriptor->listEntry); + + if(AddRxBufferToQueue(pContext, pBuffersDescriptor)) + { + InsertTailList(&pContext->NetReceiveBuffers, &pBuffersDescriptor->listEntry); + + pContext->NetNofReceiveBuffers++; + + if (pContext->NetNofReceiveBuffers > pContext->NetMaxReceiveBuffers) + { + DPrintf(0, (" Error: NetNofReceiveBuffers > NetMaxReceiveBuffers(%d>%d)", + pContext->NetNofReceiveBuffers, pContext->NetMaxReceiveBuffers)); + } + + if (++pContext->Counters.nReusedRxBuffers >= pContext->Limits.nReusedRxBuffers) + { + pContext->Counters.nReusedRxBuffers = 0; + virtqueue_kick_always(pContext->NetReceiveQueue); + } + + if (IsListEmpty(&pContext->NetReceiveBuffersWaiting)) + { + if (pContext->ReceiveState == srsPausing || pContext->ReceivePauseCompletionProc) + { + ONPAUSECOMPLETEPROC callback = pContext->ReceivePauseCompletionProc; + pContext->ReceiveState = srsDisabled; + pContext->ReceivePauseCompletionProc = NULL; + ParaNdis_DebugHistory(pContext, hopInternalReceivePause, NULL, 0, 0, 0); + if (callback) callback(pContext); + } + } + } + else + { + DPrintf(0, ("FAILED TO REUSE THE BUFFER!!!!")); + VirtIONetFreeBufferDescriptor(pContext, pBuffersDescriptor); + pContext->NetMaxReceiveBuffers--; + } +} + +/********************************************************** +It is called from Rx processing routines between power off and power on in non-paused mode (Win8). +Returns received buffer to NetReceiveBuffers. +All the buffers will be placed into Virtio queue during power-on procedure + +Must be called with &pContext->ReceiveLock acquired + +Parameters: + context + void *pDescriptor - pIONetDescriptor to return +***********************************************************/ +static void ReuseReceiveBufferPowerOff(PARANDIS_ADAPTER *pContext, pIONetDescriptor pBuffersDescriptor) +{ + RemoveEntryList(&pBuffersDescriptor->listEntry); + InsertTailList(&pContext->NetReceiveBuffers, &pBuffersDescriptor->listEntry); +} + +/********************************************************** +It is called from Tx processing routines +Gets all the finished buffer from VirtIO TX path and +returns them to NetFreeSendBuffers + +Must be called with &pContext->SendLock acquired + +Parameters: + context +Return value: + (for reference) number of TX buffers returned +***********************************************************/ +UINT ParaNdis_VirtIONetReleaseTransmitBuffers( + PARANDIS_ADAPTER *pContext) +{ + UINT len, i = 0; + pIONetDescriptor pBufferDescriptor; + + DEBUG_ENTRY(4); + + while(NULL != (pBufferDescriptor = virtqueue_get_buf(pContext->NetSendQueue, &len))) + { + RemoveEntryList(&pBufferDescriptor->listEntry); + pContext->nofFreeTxDescriptors++; + if (!pBufferDescriptor->nofUsedBuffers) + { + DPrintf(0, ("[%s] ERROR: nofUsedBuffers not set!", __FUNCTION__)); + } + pContext->nofFreeHardwareBuffers += pBufferDescriptor->nofUsedBuffers; + ParaNdis_OnTransmitBufferReleased(pContext, pBufferDescriptor); + InsertTailList(&pContext->NetFreeSendBuffers, &pBufferDescriptor->listEntry); + DPrintf(3, ("[%s] Free Tx: desc %d, buff %d", __FUNCTION__, pContext->nofFreeTxDescriptors, pContext->nofFreeHardwareBuffers)); + pBufferDescriptor->nofUsedBuffers = 0; + ++i; + } + if (i) + { + NdisGetCurrentSystemTime(&pContext->LastTxCompletionTimeStamp); + pContext->bDoKickOnNoBuffer = TRUE; + pContext->nDetectedStoppedTx = 0; + } + DEBUG_EXIT_STATUS((i ? 3 : 5), i); + return i; +} + +static ULONG FORCEINLINE QueryTcpHeaderOffset(PVOID packetData, ULONG ipHeaderOffset, ULONG ipPacketLength) +{ + ULONG res; + tTcpIpPacketParsingResult ppr = ParaNdis_ReviewIPPacket( + (PUCHAR)packetData + ipHeaderOffset, + ipPacketLength, + __FUNCTION__); + if (ppr.xxpStatus == ppresXxpKnown) + { + res = ipHeaderOffset + ppr.ipHeaderSize; + } + else + { + DPrintf(0, ("[%s] ERROR: NOT a TCP or UDP packet - expected troubles!", __FUNCTION__)); + res = 0; + } + return res; +} + + +/********************************************************* +Called with from ProcessTx routine with TxLock held +Uses pContext->sgTxGatherTable +***********************************************************/ +tCopyPacketResult ParaNdis_DoSubmitPacket(PARANDIS_ADAPTER *pContext, tTxOperationParameters *Params) +{ + tCopyPacketResult result; + tMapperResult mapResult = {0,0,0}; + // populating priority tag or LSO MAY require additional SG element + UINT nRequiredBuffers; + BOOLEAN bUseCopy = FALSE; + struct VirtIOBufferDescriptor *sg = pContext->sgTxGatherTable; + + nRequiredBuffers = Params->nofSGFragments + 1 + ((Params->flags & (pcrPriorityTag | pcrLSO)) ? 1 : 0); + + result.size = 0; + result.error = cpeOK; + if (!pContext->bUseScatterGather || // only copy available + Params->nofSGFragments == 0 || // theoretical case + !sg || // only copy available + ((~Params->flags & pcrLSO) && nRequiredBuffers > pContext->maxFreeHardwareBuffers) // to many fragments and normal size of packet + ) + { + nRequiredBuffers = 2; + bUseCopy = TRUE; + } + else if (pContext->bUseIndirect && !(Params->flags & pcrNoIndirect)) + { + nRequiredBuffers = 1; + } + + // I do not think this will help, but at least we can try freeing some buffers right now + if (pContext->nofFreeHardwareBuffers < nRequiredBuffers || !pContext->nofFreeTxDescriptors) + { + ParaNdis_VirtIONetReleaseTransmitBuffers(pContext); + } + + if (nRequiredBuffers > pContext->maxFreeHardwareBuffers) + { + // LSO and too many buffers, impossible to send + result.error = cpeTooLarge; + DPrintf(0, ("[%s] ERROR: too many fragments(%d required, %d max.avail)!", __FUNCTION__, + nRequiredBuffers, pContext->maxFreeHardwareBuffers)); + } + else if (pContext->nofFreeHardwareBuffers < nRequiredBuffers || !pContext->nofFreeTxDescriptors) + { + virtqueue_enable_cb_delayed(pContext->NetSendQueue); + result.error = cpeNoBuffer; + } + else if (Params->offloadMss && bUseCopy) + { + result.error = cpeInternalError; + DPrintf(0, ("[%s] ERROR: expecting SG for TSO! (%d buffers, %d bytes)", __FUNCTION__, + Params->nofSGFragments, Params->ulDataSize)); + } + else if (bUseCopy) + { + result = ParaNdis_DoCopyPacketData(pContext, Params); + } + else + { + UINT nMappedBuffers; + ULONGLONG paOfIndirectArea = 0; + PVOID vaOfIndirectArea = NULL; + pIONetDescriptor pBuffersDescriptor = (pIONetDescriptor)RemoveHeadList(&pContext->NetFreeSendBuffers); + pContext->nofFreeTxDescriptors--; + NdisZeroMemory(pBuffersDescriptor->HeaderInfo.Virtual, pBuffersDescriptor->HeaderInfo.size); + sg[0].physAddr = pBuffersDescriptor->HeaderInfo.Physical; + sg[0].length = pBuffersDescriptor->HeaderInfo.size; + ParaNdis_PacketMapper( + pContext, + Params->packet, + Params->ReferenceValue, + sg + 1, + pBuffersDescriptor, + &mapResult); + nMappedBuffers = mapResult.usBuffersMapped; + if (nMappedBuffers) + { + nMappedBuffers++; + if (pContext->bUseIndirect && !(Params->flags & pcrNoIndirect)) + { + ULONG space1 = (mapResult.usBufferSpaceUsed + 7) & ~7; + ULONG space2 = nMappedBuffers * SIZE_OF_SINGLE_INDIRECT_DESC; + if (pBuffersDescriptor->DataInfo.size >= (space1 + space2)) + { + vaOfIndirectArea = RtlOffsetToPointer(pBuffersDescriptor->DataInfo.Virtual, space1); + paOfIndirectArea = pBuffersDescriptor->DataInfo.Physical.QuadPart + space1; + pContext->extraStatistics.framesIndirect++; + } + else if (nMappedBuffers <= pContext->nofFreeHardwareBuffers) + { + // send as is, no indirect + } + else + { + result.error = cpeNoIndirect; + DPrintf(0, ("[%s] Unexpected ERROR of placement!", __FUNCTION__)); + } + } + if (result.error == cpeOK) + { + if (Params->flags & (pcrTcpChecksum | pcrUdpChecksum)) + { + unsigned short addPriorityLen = (Params->flags & pcrPriorityTag) ? ETH_PRIORITY_HEADER_SIZE : 0; + if (pContext->bDoHardwareChecksum) + { + virtio_net_hdr_basic *pheader = pBuffersDescriptor->HeaderInfo.Virtual; + pheader->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + if (!Params->tcpHeaderOffset) + { + Params->tcpHeaderOffset = QueryTcpHeaderOffset( + pBuffersDescriptor->DataInfo.Virtual, + pContext->Offload.ipHeaderOffset + addPriorityLen, + mapResult.usBufferSpaceUsed - pContext->Offload.ipHeaderOffset - addPriorityLen); + } + else + { + Params->tcpHeaderOffset += addPriorityLen; + } + pheader->csum_start = (USHORT)Params->tcpHeaderOffset; + pheader->csum_offset = (Params->flags & pcrTcpChecksum) ? TCP_CHECKSUM_OFFSET : UDP_CHECKSUM_OFFSET; + } + else + { + // emulation mode - it is slow and intended only for test of flows + // and debugging of WLK test cases + PVOID pCopy = ParaNdis_AllocateMemory(pContext, Params->ulDataSize); + if (pCopy) + { + tTcpIpPacketParsingResult ppr; + // duplicate entire packet + ParaNdis_PacketCopier(Params->packet, pCopy, Params->ulDataSize, Params->ReferenceValue, FALSE); + // calculate complete TCP/UDP checksum + ppr = ParaNdis_CheckSumVerify( + RtlOffsetToPointer(pCopy, pContext->Offload.ipHeaderOffset + addPriorityLen), + Params->ulDataSize - pContext->Offload.ipHeaderOffset - addPriorityLen, + pcrAnyChecksum | pcrFixXxpChecksum, + __FUNCTION__); + // data portion in aside buffer contains complete IP+TCP header + // rewrite copy of original buffer by one new with calculated data + NdisMoveMemory( + pBuffersDescriptor->DataInfo.Virtual, + pCopy, + mapResult.usBufferSpaceUsed); + NdisFreeMemory(pCopy, 0, 0); + } + } + } + + if (0 <= virtqueue_add_buf( + pContext->NetSendQueue, + sg, + nMappedBuffers, + 0, + pBuffersDescriptor, + vaOfIndirectArea, + paOfIndirectArea)) + { + pBuffersDescriptor->nofUsedBuffers = nMappedBuffers; + pContext->nofFreeHardwareBuffers -= nMappedBuffers; + if (pContext->minFreeHardwareBuffers > pContext->nofFreeHardwareBuffers) + pContext->minFreeHardwareBuffers = pContext->nofFreeHardwareBuffers; + pBuffersDescriptor->ReferenceValue = Params->ReferenceValue; + result.size = Params->ulDataSize; + DPrintf(2, ("[%s] Submitted %d buffers (%d bytes), avail %d desc, %d bufs", + __FUNCTION__, nMappedBuffers, result.size, + pContext->nofFreeTxDescriptors, pContext->nofFreeHardwareBuffers + )); + } + else + { + result.error = cpeInternalError; + DPrintf(0, ("[%s] Unexpected ERROR adding buffer to TX engine!..", __FUNCTION__)); + } + } + } + else + { + DPrintf(0, ("[%s] Unexpected ERROR: packet not mapped!", __FUNCTION__)); + result.error = cpeInternalError; + } + + if (result.error == cpeOK) + { + UCHAR ethernetHeader[sizeof(ETH_HEADER)]; + eInspectedPacketType packetType; + /* get the ethernet header for review */ + ParaNdis_PacketCopier(Params->packet, ethernetHeader, sizeof(ethernetHeader), Params->ReferenceValue, TRUE); + packetType = QueryPacketType(ethernetHeader); + DebugDumpPacket("sending", ethernetHeader, 3); + InsertTailList(&pContext->NetSendBuffersInUse, &pBuffersDescriptor->listEntry); + pContext->Statistics.ifHCOutOctets += result.size; + switch (packetType) + { + case iptBroadcast: + pContext->Statistics.ifHCOutBroadcastOctets += result.size; + pContext->Statistics.ifHCOutBroadcastPkts++; + break; + case iptMulticast: + pContext->Statistics.ifHCOutMulticastOctets += result.size; + pContext->Statistics.ifHCOutMulticastPkts++; + break; + default: + pContext->Statistics.ifHCOutUcastOctets += result.size; + pContext->Statistics.ifHCOutUcastPkts++; + break; + } + + if (Params->flags & pcrLSO) + pContext->extraStatistics.framesLSO++; + } + else + { + pContext->nofFreeTxDescriptors++; + InsertHeadList(&pContext->NetFreeSendBuffers, &pBuffersDescriptor->listEntry); + } + } + if (result.error == cpeNoBuffer && pContext->bDoKickOnNoBuffer) + { + virtqueue_kick_always(pContext->NetSendQueue); + pContext->bDoKickOnNoBuffer = FALSE; + } + if (result.error == cpeOK) + { + if (Params->flags & (pcrTcpChecksum | pcrUdpChecksum)) + pContext->extraStatistics.framesCSOffload++; + } + return result; +} + + +/********************************************************** +It is called from Tx processing routines +Prepares the VirtIO buffer and copies to it the data from provided packet + +Must be called with &pContext->SendLock acquired + +Parameters: + context + tPacketType packet specific type is NDIS dependent + tCopyPacketDataFunction PacketCopier procedure for NDIS-specific type of packet +Return value: + (for reference) number of TX buffers returned +***********************************************************/ +tCopyPacketResult ParaNdis_DoCopyPacketData( + PARANDIS_ADAPTER *pContext, + tTxOperationParameters *pParams) +{ + tCopyPacketResult result; + tCopyPacketResult CopierResult; + struct VirtIOBufferDescriptor sg[2]; + pIONetDescriptor pBuffersDescriptor = NULL; + ULONG flags = pParams->flags; + UINT nRequiredHardwareBuffers = 2; + result.size = 0; + result.error = cpeOK; + if (pContext->nofFreeHardwareBuffers < nRequiredHardwareBuffers || + IsListEmpty(&pContext->NetFreeSendBuffers)) + { + result.error = cpeNoBuffer; + } + if(result.error == cpeOK) + { + pBuffersDescriptor = (pIONetDescriptor)RemoveHeadList(&pContext->NetFreeSendBuffers); + NdisZeroMemory(pBuffersDescriptor->HeaderInfo.Virtual, pBuffersDescriptor->HeaderInfo.size); + sg[0].physAddr = pBuffersDescriptor->HeaderInfo.Physical; + sg[0].length = pBuffersDescriptor->HeaderInfo.size; + sg[1].physAddr = pBuffersDescriptor->DataInfo.Physical; + CopierResult = ParaNdis_PacketCopier( + pParams->packet, + pBuffersDescriptor->DataInfo.Virtual, + pBuffersDescriptor->DataInfo.size, + pParams->ReferenceValue, + FALSE); + sg[1].length = result.size = CopierResult.size; + // did NDIS ask us to compute CS? + if ((flags & (pcrTcpChecksum | pcrUdpChecksum | pcrIpChecksum)) != 0) + { + // we asked + unsigned short addPriorityLen = (pParams->flags & pcrPriorityTag) ? ETH_PRIORITY_HEADER_SIZE : 0; + PVOID ipPacket = RtlOffsetToPointer( + pBuffersDescriptor->DataInfo.Virtual, pContext->Offload.ipHeaderOffset + addPriorityLen); + ULONG ipPacketLength = CopierResult.size - pContext->Offload.ipHeaderOffset - addPriorityLen; + if (!pParams->tcpHeaderOffset && + (flags & (pcrTcpChecksum | pcrUdpChecksum)) ) + { + pParams->tcpHeaderOffset = QueryTcpHeaderOffset( + pBuffersDescriptor->DataInfo.Virtual, + pContext->Offload.ipHeaderOffset + addPriorityLen, + ipPacketLength); + } + else + { + pParams->tcpHeaderOffset += addPriorityLen; + } + + if (pContext->bDoHardwareChecksum) + { + if (flags & (pcrTcpChecksum | pcrUdpChecksum)) + { + // hardware offload + virtio_net_hdr_basic *pvnh = (virtio_net_hdr_basic *)pBuffersDescriptor->HeaderInfo.Virtual; + pvnh->csum_start = (USHORT)pParams->tcpHeaderOffset; + pvnh->csum_offset = (flags & pcrTcpChecksum) ? TCP_CHECKSUM_OFFSET : UDP_CHECKSUM_OFFSET; + pvnh->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; + } + if (flags & (pcrIpChecksum)) + { + ParaNdis_CheckSumVerify( + ipPacket, + ipPacketLength, + pcrIpChecksum | pcrFixIPChecksum, + __FUNCTION__); + } + } + else if (CopierResult.size > pContext->Offload.ipHeaderOffset) + { + ULONG csFlags = 0; + if (flags & pcrIpChecksum) csFlags |= pcrIpChecksum | pcrFixIPChecksum; + if (flags & (pcrTcpChecksum | pcrUdpChecksum)) csFlags |= pcrTcpChecksum | pcrUdpChecksum| pcrFixXxpChecksum; + // software offload + ParaNdis_CheckSumVerify( + ipPacket, + ipPacketLength, + csFlags, + __FUNCTION__); + } + else + { + DPrintf(0, ("[%s] ERROR: Invalid buffer size for offload!", __FUNCTION__)); + result.size = 0; + result.error = cpeInternalError; + } + } + pContext->nofFreeTxDescriptors--; + if (result.size) + { + eInspectedPacketType packetType; + packetType = QueryPacketType(pBuffersDescriptor->DataInfo.Virtual); + DebugDumpPacket("sending", pBuffersDescriptor->DataInfo.Virtual, 3); + + pBuffersDescriptor->nofUsedBuffers = nRequiredHardwareBuffers; + pContext->nofFreeHardwareBuffers -= nRequiredHardwareBuffers; + if (pContext->minFreeHardwareBuffers > pContext->nofFreeHardwareBuffers) + pContext->minFreeHardwareBuffers = pContext->nofFreeHardwareBuffers; + if (0 > virtqueue_add_buf( + pContext->NetSendQueue, + sg, + 2, + 0, + pBuffersDescriptor, + NULL, + 0 + )) + { + pBuffersDescriptor->nofUsedBuffers = 0; + pContext->nofFreeHardwareBuffers += nRequiredHardwareBuffers; + result.error = cpeInternalError; + result.size = 0; + DPrintf(0, ("[%s] Unexpected ERROR adding buffer to TX engine!..", __FUNCTION__)); + } + else + { + DPrintf(2, ("[%s] Submitted %d buffers (%d bytes), avail %d desc, %d bufs", + __FUNCTION__, nRequiredHardwareBuffers, result.size, + pContext->nofFreeTxDescriptors, pContext->nofFreeHardwareBuffers + )); + } + if (result.error != cpeOK) + { + InsertTailList(&pContext->NetFreeSendBuffers, &pBuffersDescriptor->listEntry); + pContext->nofFreeTxDescriptors++; + } + else + { + ULONG reportedSize = pParams->ulDataSize; + pBuffersDescriptor->ReferenceValue = pParams->ReferenceValue; + InsertTailList(&pContext->NetSendBuffersInUse, &pBuffersDescriptor->listEntry); + pContext->Statistics.ifHCOutOctets += reportedSize; + switch (packetType) + { + case iptBroadcast: + pContext->Statistics.ifHCOutBroadcastOctets += reportedSize; + pContext->Statistics.ifHCOutBroadcastPkts++; + break; + case iptMulticast: + pContext->Statistics.ifHCOutMulticastOctets += reportedSize; + pContext->Statistics.ifHCOutMulticastPkts++; + break; + default: + pContext->Statistics.ifHCOutUcastOctets += reportedSize; + pContext->Statistics.ifHCOutUcastPkts++; + break; + } + } + } + else + { + DPrintf(0, ("[%s] Unexpected ERROR in copying packet data! Continue...", __FUNCTION__)); + InsertTailList(&pContext->NetFreeSendBuffers, &pBuffersDescriptor->listEntry); + pContext->nofFreeTxDescriptors++; + // the buffer is not copied and the callback will not be called + result.error = cpeInternalError; + } + } + + return result; +} + +static ULONG ShallPassPacket(PARANDIS_ADAPTER *pContext, PVOID address, UINT len, eInspectedPacketType *pType) +{ + ULONG b; + if (len <= sizeof(ETH_HEADER)) return FALSE; + if (len > pContext->MaxPacketSize.nMaxFullSizeHwRx) return FALSE; + if (len > pContext->MaxPacketSize.nMaxFullSizeOS && !ETH_HAS_PRIO_HEADER(address)) return FALSE; + *pType = QueryPacketType(address); + if (pContext->PacketFilter & NDIS_PACKET_TYPE_PROMISCUOUS) return TRUE; + + switch(*pType) + { + case iptBroadcast: + b = pContext->PacketFilter & NDIS_PACKET_TYPE_BROADCAST; + break; + case iptMulticast: + b = pContext->PacketFilter & NDIS_PACKET_TYPE_ALL_MULTICAST; + if (!b && (pContext->PacketFilter & NDIS_PACKET_TYPE_MULTICAST)) + { + UINT i, n = pContext->MulticastData.nofMulticastEntries * ETH_LENGTH_OF_ADDRESS; + b = 1; + for (i = 0; b && i < n; i += ETH_LENGTH_OF_ADDRESS) + { + ETH_COMPARE_NETWORK_ADDRESSES((PUCHAR)address, &pContext->MulticastData.MulticastList[i], &b) + } + b = !b; + } + break; + default: + ETH_COMPARE_NETWORK_ADDRESSES((PUCHAR)address, pContext->CurrentMacAddress, &b); + b = !b && (pContext->PacketFilter & NDIS_PACKET_TYPE_DIRECTED); + break; + } + if (!b) + { + pContext->extraStatistics.framesFilteredOut++; + } + return b; +} + +void +ParaNdis_PadPacketReceived(PVOID pDataBuffer, PULONG pLength) +{ + // Ethernet standard declares minimal possible packet size + // Packets smaller than that must be padded before transfer + // Ethernet HW pads packets on transmit, however in our case + // some packets do not travel over Ethernet but being routed + // guest-to-guest by virtual switch. + // In this case padding is not performed and we may + // receive packet smaller than minimal allowed size. This is not + // a problem for real life scenarios however WHQL/HCK contains + // tests that check padding of received packets. + // To make these tests happy we have to pad small packets on receive + + //NOTE: This function assumes that VLAN header has been already stripped out + + if(*pLength < ETH_MIN_PACKET_SIZE) + { + RtlZeroMemory(RtlOffsetToPointer(pDataBuffer, *pLength), ETH_MIN_PACKET_SIZE - *pLength); + *pLength = ETH_MIN_PACKET_SIZE; + } +} + +/********************************************************** +Manages RX path, calling NDIS-specific procedure for packet indication +Parameters: + context +***********************************************************/ +static UINT ParaNdis_ProcessRxPath(PARANDIS_ADAPTER *pContext, ULONG ulMaxPacketsToIndicate) +{ + pIONetDescriptor pBuffersDescriptor; + UINT len, headerSize = pContext->nVirtioHeaderSize; + eInspectedPacketType packetType = iptInvalid; + UINT nReceived = 0, nRetrieved = 0, nReported = 0; + tPacketIndicationType *pBatchOfPackets; + UINT maxPacketsInBatch = pContext->NetMaxReceiveBuffers; + pBatchOfPackets = pContext->bBatchReceive ? + ParaNdis_AllocateMemory(pContext, maxPacketsInBatch * sizeof(tPacketIndicationType)) : NULL; + NdisAcquireSpinLock(&pContext->ReceiveLock); + while ((nReported < ulMaxPacketsToIndicate) && NULL != (pBuffersDescriptor = virtqueue_get_buf(pContext->NetReceiveQueue, &len))) + { + PVOID pDataBuffer = RtlOffsetToPointer(pBuffersDescriptor->DataInfo.Virtual, pContext->bUseMergedBuffers ? pContext->nVirtioHeaderSize : 0); + RemoveEntryList(&pBuffersDescriptor->listEntry); + InsertTailList(&pContext->NetReceiveBuffersWaiting, &pBuffersDescriptor->listEntry); + pContext->NetNofReceiveBuffers--; + nRetrieved++; + DPrintf(2, ("[%s] retrieved header+%d b.", __FUNCTION__, len - headerSize)); + DebugDumpPacket("receive", pDataBuffer, 3); + + if( !pContext->bSurprizeRemoved && + ShallPassPacket(pContext, pDataBuffer, len - headerSize, &packetType) && + pContext->ReceiveState == srsEnabled && + pContext->bConnected) + { + BOOLEAN b = FALSE; + ULONG length = len - headerSize; + if (!pBatchOfPackets) + { + NdisReleaseSpinLock(&pContext->ReceiveLock); + b = NULL != ParaNdis_IndicateReceivedPacket( + pContext, + pDataBuffer, + &length, + FALSE, + pBuffersDescriptor); + NdisAcquireSpinLock(&pContext->ReceiveLock); + } + else + { + tPacketIndicationType packet; + packet = ParaNdis_IndicateReceivedPacket( + pContext, + pDataBuffer, + &length, + TRUE, + pBuffersDescriptor); + b = packet != NULL; + if (b) pBatchOfPackets[nReceived] = packet; + } + if (!b) + { + pContext->ReuseBufferProc(pContext, pBuffersDescriptor); + //only possible reason for that is unexpected Vlan tag + //shall I count it as error? + pContext->Statistics.ifInErrors++; + pContext->Statistics.ifInDiscards++; + } + else + { + nReceived++; + nReported++; + pContext->Statistics.ifHCInOctets += length; + switch(packetType) + { + case iptBroadcast: + pContext->Statistics.ifHCInBroadcastPkts++; + pContext->Statistics.ifHCInBroadcastOctets += length; + break; + case iptMulticast: + pContext->Statistics.ifHCInMulticastPkts++; + pContext->Statistics.ifHCInMulticastOctets += length; + break; + default: + pContext->Statistics.ifHCInUcastPkts++; + pContext->Statistics.ifHCInUcastOctets += length; + break; + } + if (pBatchOfPackets && nReceived == maxPacketsInBatch) + { + DPrintf(1, ("[%s] received %d buffers of max %d", __FUNCTION__, nReceived, ulMaxPacketsToIndicate)); + NdisReleaseSpinLock(&pContext->ReceiveLock); + ParaNdis_IndicateReceivedBatch(pContext, pBatchOfPackets, nReceived); + NdisAcquireSpinLock(&pContext->ReceiveLock); + nReceived = 0; + } + } + } + else + { + // reuse packet, there is no data or the RX is suppressed + pContext->ReuseBufferProc(pContext, pBuffersDescriptor); + } + } + ParaNdis_DebugHistory(pContext, hopReceiveStat, NULL, nRetrieved, nReported, pContext->NetNofReceiveBuffers); + NdisReleaseSpinLock(&pContext->ReceiveLock); + if (nReceived && pBatchOfPackets) + { + DPrintf(1, ("[%s]%d: received %d buffers of max %d", __FUNCTION__, KeGetCurrentProcessorNumber(), nReceived, ulMaxPacketsToIndicate)); + ParaNdis_IndicateReceivedBatch(pContext, pBatchOfPackets, nReceived); + } + if (pBatchOfPackets) NdisFreeMemory(pBatchOfPackets, 0, 0); + return nReported; +} + +void ParaNdis_ReportLinkStatus(PARANDIS_ADAPTER *pContext, BOOLEAN bForce) +{ + BOOLEAN bConnected = TRUE; + if (pContext->bLinkDetectSupported) + { + USHORT linkStatus = 0; + USHORT offset = sizeof(pContext->CurrentMacAddress); + // link changed + virtio_get_config(&pContext->IODevice, offset, &linkStatus, sizeof(linkStatus)); + bConnected = (linkStatus & VIRTIO_NET_S_LINK_UP) != 0; + } + ParaNdis_IndicateConnect(pContext, bConnected, bForce); +} + +static BOOLEAN RestartQueueSynchronously(tSynchronizedContext *SyncContext) +{ + struct virtqueue * _vq = (struct virtqueue *) SyncContext->Parameter; + bool res = true; + if (!virtqueue_enable_cb(_vq)) + { + virtqueue_disable_cb(_vq); + res = false; + } + + ParaNdis_DebugHistory(SyncContext->pContext, hopDPC, (PVOID)SyncContext->Parameter, 0x20, res, 0); + return !res; +} +/********************************************************** +DPC implementation, common for both NDIS +Parameters: + context +***********************************************************/ +ULONG ParaNdis_DPCWorkBody(PARANDIS_ADAPTER *pContext, ULONG ulMaxPacketsToIndicate) +{ + ULONG stillRequiresProcessing = 0; + ULONG interruptSources; + UINT uIndicatedRXPackets = 0; + UINT numOfPacketsToIndicate = min(ulMaxPacketsToIndicate, pContext->uNumberOfHandledRXPacketsInDPC); + + DEBUG_ENTRY(5); + if (pContext->bEnableInterruptHandlingDPC) + { + InterlockedIncrement(&pContext->counterDPCInside); + if (pContext->bEnableInterruptHandlingDPC) + { + BOOLEAN bDoKick = FALSE; + + InterlockedExchange(&pContext->bDPCInactive, 0); + interruptSources = InterlockedExchange(&pContext->InterruptStatus, 0); + ParaNdis_DebugHistory(pContext, hopDPC, (PVOID)1, interruptSources, 0, 0); + if ((interruptSources & isControl) && pContext->bLinkDetectSupported) + { + ParaNdis_ReportLinkStatus(pContext, FALSE); + } + if (interruptSources & isTransmit) + { + bDoKick = ParaNdis_ProcessTx(pContext, TRUE, TRUE); + } + if (interruptSources & isReceive) + { + int nRestartResult = 0; + + do + { + LONG rxActive = InterlockedIncrement(&pContext->dpcReceiveActive); + if (rxActive == 1) + { + uIndicatedRXPackets += ParaNdis_ProcessRxPath(pContext, numOfPacketsToIndicate - uIndicatedRXPackets); + InterlockedDecrement(&pContext->dpcReceiveActive); + NdisAcquireSpinLock(&pContext->ReceiveLock); + nRestartResult = ParaNdis_SynchronizeWithInterrupt( + pContext, pContext->ulRxMessage, RestartQueueSynchronously, pContext->NetReceiveQueue); + ParaNdis_DebugHistory(pContext, hopDPC, (PVOID)3, nRestartResult, 0, 0); + NdisReleaseSpinLock(&pContext->ReceiveLock); + DPrintf(nRestartResult ? 2 : 6, ("[%s] queue restarted%s", __FUNCTION__, nRestartResult ? "(Rerun)" : "(Done)")); + + if (uIndicatedRXPackets < numOfPacketsToIndicate) + { + + } + else if (uIndicatedRXPackets == numOfPacketsToIndicate) + { + DPrintf(1, ("[%s] Breaking Rx loop after %d indications", __FUNCTION__, uIndicatedRXPackets)); + ParaNdis_DebugHistory(pContext, hopDPC, (PVOID)4, nRestartResult, 0, 0); + break; + } + else + { + DPrintf(0, ("[%s] Glitch found: %d allowed, %d indicated", __FUNCTION__, numOfPacketsToIndicate, uIndicatedRXPackets)); + ParaNdis_DebugHistory(pContext, hopDPC, (PVOID)6, nRestartResult, 0, 0); + } + } + else + { + InterlockedDecrement(&pContext->dpcReceiveActive); + if (!nRestartResult) + { + NdisAcquireSpinLock(&pContext->ReceiveLock); + nRestartResult = ParaNdis_SynchronizeWithInterrupt( + pContext, pContext->ulRxMessage, RestartQueueSynchronously, pContext->NetReceiveQueue); + ParaNdis_DebugHistory(pContext, hopDPC, (PVOID)5, nRestartResult, 0, 0); + NdisReleaseSpinLock(&pContext->ReceiveLock); + } + DPrintf(1, ("[%s] Skip Rx processing no.%d", __FUNCTION__, rxActive)); + break; + } + } while (nRestartResult); + + if (nRestartResult) stillRequiresProcessing |= isReceive; + } + + if (interruptSources & isTransmit) + { + NdisAcquireSpinLock(&pContext->SendLock); + if (ParaNdis_SynchronizeWithInterrupt(pContext, pContext->ulTxMessage, RestartQueueSynchronously, pContext->NetSendQueue)) + stillRequiresProcessing |= isTransmit; + if(bDoKick) + { +#ifdef PARANDIS_TEST_TX_KICK_ALWAYS + virtqueue_kick_always(pContext->NetSendQueue); +#else + virtqueue_kick(pContext->NetSendQueue); +#endif + } + NdisReleaseSpinLock(&pContext->SendLock); + } + } + InterlockedDecrement(&pContext->counterDPCInside); + ParaNdis_DebugHistory(pContext, hopDPC, NULL, stillRequiresProcessing, pContext->nofFreeHardwareBuffers, pContext->nofFreeTxDescriptors); + } + return stillRequiresProcessing; +} + +/********************************************************** +Periodically called procedure, checking dpc activity +If DPC are not running, it does exactly the same that the DPC +Parameters: + context +***********************************************************/ +static BOOLEAN CheckRunningDpc(PARANDIS_ADAPTER *pContext) +{ + BOOLEAN bStopped; + BOOLEAN bReportHang = FALSE; + bStopped = 0 != InterlockedExchange(&pContext->bDPCInactive, TRUE); + + if (bStopped) + { + pContext->nDetectedInactivity++; + if (pContext->nEnableDPCChecker) + { + if (pContext->NetTxPacketsToReturn) + { + DPrintf(0, ("[%s] - NO ACTIVITY!", __FUNCTION__)); + if (!pContext->Limits.nPrintDiagnostic) PrintStatistics(pContext); + if (pContext->nEnableDPCChecker > 1) + { + int isrStatus1, isrStatus2; + isrStatus1 = virtio_read_isr_status(&pContext->IODevice); + isrStatus2 = virtio_read_isr_status(&pContext->IODevice); + if (isrStatus1 || isrStatus2) + { + DPrintf(0, ("WARNING: Interrupt status %d=>%d", isrStatus1, isrStatus2)); + } + } + // simulateDPC + InterlockedOr(&pContext->InterruptStatus, isAny); + ParaNdis_DPCWorkBody(pContext, PARANDIS_UNLIMITED_PACKETS_TO_INDICATE); + } + } + } + else + { + pContext->nDetectedInactivity = 0; + } + + NdisAcquireSpinLock(&pContext->SendLock); + if (pContext->nofFreeHardwareBuffers != pContext->maxFreeHardwareBuffers) + { + if (pContext->nDetectedStoppedTx++ > 1) + { + DPrintf(0, ("[%s] - Suspicious Tx inactivity (%d)!", __FUNCTION__, pContext->nofFreeHardwareBuffers)); + //bReportHang = TRUE; +#ifdef DBG_USE_VIRTIO_PCI_ISR_FOR_HOST_REPORT + WriteVirtIODeviceByte(pContext->IODevice.isr, 0); +#endif + } + } + NdisReleaseSpinLock(&pContext->SendLock); + + + if (pContext->Limits.nPrintDiagnostic && + ++pContext->Counters.nPrintDiagnostic >= pContext->Limits.nPrintDiagnostic) + { + pContext->Counters.nPrintDiagnostic = 0; + // todo - collect more and put out optionally + PrintStatistics(pContext); + } + + if (pContext->Statistics.ifHCInOctets == pContext->Counters.prevIn) + { + pContext->Counters.nRxInactivity++; + if (pContext->Counters.nRxInactivity >= 10) + { +//#define CRASH_ON_NO_RX +#if defined(CRASH_ON_NO_RX) + ONPAUSECOMPLETEPROC proc = (ONPAUSECOMPLETEPROC)(PVOID)1; + proc(pContext); +#endif + } + } + else + { + pContext->Counters.nRxInactivity = 0; + pContext->Counters.prevIn = pContext->Statistics.ifHCInOctets; + } + return bReportHang; +} + +/********************************************************** +Common implementation of periodic poll +Parameters: + context +Return: + TRUE, if reset required +***********************************************************/ +BOOLEAN ParaNdis_CheckForHang(PARANDIS_ADAPTER *pContext) +{ + static int nHangOn = 0; + BOOLEAN b = nHangOn >= 3 && nHangOn < 6; + DEBUG_ENTRY(3); + b |= CheckRunningDpc(pContext); + //uncomment to cause 3 consecutive resets + //nHangOn++; + DEBUG_EXIT_STATUS(b ? 0 : 6, b); + return b; +} + +/********************************************************** +Common handler of multicast address configuration +Parameters: + PVOID Buffer array of addresses from NDIS + ULONG BufferSize size of incoming buffer + PUINT pBytesRead update on success + PUINT pBytesNeeded update on wrong buffer size +Return value: + SUCCESS or kind of failure +***********************************************************/ +NDIS_STATUS ParaNdis_SetMulticastList( + PARANDIS_ADAPTER *pContext, + PVOID Buffer, + ULONG BufferSize, + PUINT pBytesRead, + PUINT pBytesNeeded) +{ + NDIS_STATUS status; + ULONG length = BufferSize; + if (length > sizeof(pContext->MulticastData.MulticastList)) + { + status = NDIS_STATUS_MULTICAST_FULL; + *pBytesNeeded = sizeof(pContext->MulticastData.MulticastList); + } + else if (length % ETH_LENGTH_OF_ADDRESS) + { + status = NDIS_STATUS_INVALID_LENGTH; + *pBytesNeeded = (length / ETH_LENGTH_OF_ADDRESS) * ETH_LENGTH_OF_ADDRESS; + } + else + { + NdisZeroMemory(pContext->MulticastData.MulticastList, sizeof(pContext->MulticastData.MulticastList)); + if (length) + NdisMoveMemory(pContext->MulticastData.MulticastList, Buffer, length); + pContext->MulticastData.nofMulticastEntries = length / ETH_LENGTH_OF_ADDRESS; + DPrintf(1, ("[%s] New multicast list of %d bytes", __FUNCTION__, length)); + *pBytesRead = length; + status = NDIS_STATUS_SUCCESS; + } + return status; +} + +/********************************************************** +Callable from synchronized routine or interrupt handler +to enable or disable Rx and/or Tx interrupt generation +Parameters: + context + interruptSource - isReceive, isTransmit + b - 1/0 enable/disable +***********************************************************/ +VOID ParaNdis_VirtIOEnableIrqSynchronized(PARANDIS_ADAPTER *pContext, ULONG interruptSource) +{ + if (interruptSource & isTransmit) + virtqueue_enable_cb(pContext->NetSendQueue); + if (interruptSource & isReceive) + virtqueue_enable_cb(pContext->NetReceiveQueue); + ParaNdis_DebugHistory(pContext, hopDPC, (PVOID)0x10, interruptSource, TRUE, 0); +} + +VOID ParaNdis_VirtIODisableIrqSynchronized(PARANDIS_ADAPTER *pContext, ULONG interruptSource) +{ + if (interruptSource & isTransmit) + virtqueue_disable_cb(pContext->NetSendQueue); + if (interruptSource & isReceive) + virtqueue_disable_cb(pContext->NetReceiveQueue); + ParaNdis_DebugHistory(pContext, hopDPC, (PVOID)0x10, interruptSource, FALSE, 0); +} + +/********************************************************** +Common handler of PnP events +Parameters: +Return value: +***********************************************************/ +VOID ParaNdis_OnPnPEvent( + PARANDIS_ADAPTER *pContext, + NDIS_DEVICE_PNP_EVENT pEvent, + PVOID pInfo, + ULONG ulSize) +{ + const char *pName = ""; + DEBUG_ENTRY(0); +#undef MAKECASE +#define MAKECASE(x) case (x): pName = #x; break; + switch (pEvent) + { + MAKECASE(NdisDevicePnPEventQueryRemoved) + MAKECASE(NdisDevicePnPEventRemoved) + MAKECASE(NdisDevicePnPEventSurpriseRemoved) + MAKECASE(NdisDevicePnPEventQueryStopped) + MAKECASE(NdisDevicePnPEventStopped) + MAKECASE(NdisDevicePnPEventPowerProfileChanged) + default: + break; + } + ParaNdis_DebugHistory(pContext, hopPnpEvent, NULL, pEvent, 0, 0); + DPrintf(0, ("[%s] (%s)", __FUNCTION__, pName)); + if (pEvent == NdisDevicePnPEventSurpriseRemoved) + { + // on simulated surprise removal (under PnpTest) we need to reset the device + // to prevent any access of device queues to memory buffers + pContext->bSurprizeRemoved = TRUE; + ParaNdis_ResetVirtIONetDevice(pContext); + } + pContext->PnpEvents[pContext->nPnpEventIndex++] = pEvent; + if (pContext->nPnpEventIndex > sizeof(pContext->PnpEvents)/sizeof(pContext->PnpEvents[0])) + pContext->nPnpEventIndex = 0; +} + +static BOOLEAN SendControlMessage( + PARANDIS_ADAPTER *pContext, + UCHAR cls, + UCHAR cmd, + PVOID buffer1, + ULONG size1, + PVOID buffer2, + ULONG size2, + int levelIfOK + ) +{ + BOOLEAN bOK = FALSE; + NdisAcquireSpinLock(&pContext->ReceiveLock); + if (pContext->ControlData.Virtual && pContext->ControlData.size > (size1 + size2 + 16)) + { + struct VirtIOBufferDescriptor sg[4]; + PUCHAR pBase = (PUCHAR)pContext->ControlData.Virtual; + PHYSICAL_ADDRESS phBase = pContext->ControlData.Physical; + ULONG offset = 0; + UINT nOut = 1; + + ((virtio_net_ctrl_hdr *)pBase)->class_of_command = cls; + ((virtio_net_ctrl_hdr *)pBase)->cmd = cmd; + sg[0].physAddr = phBase; + sg[0].length = sizeof(virtio_net_ctrl_hdr); + offset += sg[0].length; + offset = (offset + 3) & ~3; + if (size1) + { + NdisMoveMemory(pBase + offset, buffer1, size1); + sg[nOut].physAddr = phBase; + sg[nOut].physAddr.QuadPart += offset; + sg[nOut].length = size1; + offset += size1; + offset = (offset + 3) & ~3; + nOut++; + } + if (size2) + { + NdisMoveMemory(pBase + offset, buffer2, size2); + sg[nOut].physAddr = phBase; + sg[nOut].physAddr.QuadPart += offset; + sg[nOut].length = size2; + offset += size2; + offset = (offset + 3) & ~3; + nOut++; + } + sg[nOut].physAddr = phBase; + sg[nOut].physAddr.QuadPart += offset; + sg[nOut].length = sizeof(virtio_net_ctrl_ack); + *(virtio_net_ctrl_ack *)(pBase + offset) = VIRTIO_NET_ERR; + + if (0 <= virtqueue_add_buf(pContext->NetControlQueue, sg, nOut, 1, (PVOID)1, NULL, 0)) + { + UINT len; + void *p; + virtqueue_kick_always(pContext->NetControlQueue); + p = virtqueue_get_buf(pContext->NetControlQueue, &len); + if (!p) + { + DPrintf(0, ("%s - ERROR: get_buf failed", __FUNCTION__)); + } + else if (len != sizeof(virtio_net_ctrl_ack)) + { + DPrintf(0, ("%s - ERROR: wrong len %d", __FUNCTION__, len)); + } + else if (*(virtio_net_ctrl_ack *)(pBase + offset) != VIRTIO_NET_OK) + { + DPrintf(0, ("%s - ERROR: error %d returned", __FUNCTION__, *(virtio_net_ctrl_ack *)(pBase + offset))); + } + else + { + // everything is OK + DPrintf(levelIfOK, ("%s OK(%d.%d,buffers of %d and %d) ", __FUNCTION__, cls, cmd, size1, size2)); + bOK = TRUE; + } + } + else + { + DPrintf(0, ("%s - ERROR: add_buf failed", __FUNCTION__)); + } + } + else + { + DPrintf(0, ("%s (buffer %d,%d) - ERROR: message too LARGE", __FUNCTION__, size1, size2)); + } + NdisReleaseSpinLock(&pContext->ReceiveLock); + return bOK; +} + +static VOID ParaNdis_DeviceFiltersUpdateRxMode(PARANDIS_ADAPTER *pContext) +{ + u8 val; + ULONG f = pContext->PacketFilter; + val = (f & NDIS_PACKET_TYPE_ALL_MULTICAST) ? 1 : 0; + SendControlMessage(pContext, VIRTIO_NET_CTRL_RX_MODE, VIRTIO_NET_CTRL_RX_MODE_ALLMULTI, &val, sizeof(val), NULL, 0, 2); + //SendControlMessage(pContext, VIRTIO_NET_CTRL_RX_MODE, VIRTIO_NET_CTRL_RX_MODE_ALLUNI, &val, sizeof(val), NULL, 0, 2); + val = (f & (NDIS_PACKET_TYPE_MULTICAST | NDIS_PACKET_TYPE_ALL_MULTICAST)) ? 0 : 1; + SendControlMessage(pContext, VIRTIO_NET_CTRL_RX_MODE, VIRTIO_NET_CTRL_RX_MODE_NOMULTI, &val, sizeof(val), NULL, 0, 2); + val = (f & NDIS_PACKET_TYPE_DIRECTED) ? 0 : 1; + SendControlMessage(pContext, VIRTIO_NET_CTRL_RX_MODE, VIRTIO_NET_CTRL_RX_MODE_NOUNI, &val, sizeof(val), NULL, 0, 2); + val = (f & NDIS_PACKET_TYPE_BROADCAST) ? 0 : 1; + SendControlMessage(pContext, VIRTIO_NET_CTRL_RX_MODE, VIRTIO_NET_CTRL_RX_MODE_NOBCAST, &val, sizeof(val), NULL, 0, 2); + val = (f & NDIS_PACKET_TYPE_PROMISCUOUS) ? 1 : 0; + SendControlMessage(pContext, VIRTIO_NET_CTRL_RX_MODE, VIRTIO_NET_CTRL_RX_MODE_PROMISC, &val, sizeof(val), NULL, 0, 2); +} + +static VOID ParaNdis_DeviceFiltersUpdateAddresses(PARANDIS_ADAPTER *pContext) +{ + struct + { + struct virtio_net_ctrl_mac header; + UCHAR addr[ETH_LENGTH_OF_ADDRESS]; + } uCast; + uCast.header.entries = 1; + NdisMoveMemory(uCast.addr, pContext->CurrentMacAddress, sizeof(uCast.addr)); + SendControlMessage(pContext, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, + &uCast, sizeof(uCast), &pContext->MulticastData,sizeof(pContext->MulticastData.nofMulticastEntries) + pContext->MulticastData.nofMulticastEntries * ETH_ALEN, 2); +} + +static VOID SetSingleVlanFilter(PARANDIS_ADAPTER *pContext, ULONG vlanId, BOOLEAN bOn, int levelIfOK) +{ + u16 val = vlanId & 0xfff; + UCHAR cmd = bOn ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; + SendControlMessage(pContext, VIRTIO_NET_CTRL_VLAN, cmd, &val, sizeof(val), NULL, 0, levelIfOK); +} + +static VOID SetAllVlanFilters(PARANDIS_ADAPTER *pContext, BOOLEAN bOn) +{ + ULONG i; + for (i = 0; i <= MAX_VLAN_ID; ++i) + SetSingleVlanFilter(pContext, i, bOn, 7); +} + +/* + possible values of filter set (pContext->ulCurrentVlansFilterSet): + 0 - all disabled + 1..4095 - one selected enabled + 4096 - all enabled + Note that only 0th vlan can't be enabled +*/ +VOID ParaNdis_DeviceFiltersUpdateVlanId(PARANDIS_ADAPTER *pContext) +{ + if (pContext->bHasHardwareFilters) + { + ULONG newFilterSet; + if (IsVlanSupported(pContext)) + newFilterSet = pContext->VlanId ? pContext->VlanId : (MAX_VLAN_ID + 1); + else + newFilterSet = IsPrioritySupported(pContext) ? (MAX_VLAN_ID + 1) : 0; + if (newFilterSet != pContext->ulCurrentVlansFilterSet) + { + if (pContext->ulCurrentVlansFilterSet > MAX_VLAN_ID) + SetAllVlanFilters(pContext, FALSE); + else if (pContext->ulCurrentVlansFilterSet) + SetSingleVlanFilter(pContext, pContext->ulCurrentVlansFilterSet, FALSE, 2); + + pContext->ulCurrentVlansFilterSet = newFilterSet; + + if (pContext->ulCurrentVlansFilterSet > MAX_VLAN_ID) + SetAllVlanFilters(pContext, TRUE); + else if (pContext->ulCurrentVlansFilterSet) + SetSingleVlanFilter(pContext, pContext->ulCurrentVlansFilterSet, TRUE, 2); + } + } +} + +VOID ParaNdis_UpdateDeviceFilters(PARANDIS_ADAPTER *pContext) +{ + if (pContext->bHasHardwareFilters) + { + ParaNdis_DeviceFiltersUpdateRxMode(pContext); + ParaNdis_DeviceFiltersUpdateAddresses(pContext); + ParaNdis_DeviceFiltersUpdateVlanId(pContext); + } +} + +NDIS_STATUS ParaNdis_PowerOn(PARANDIS_ADAPTER *pContext) +{ + LIST_ENTRY TempList; + NDIS_STATUS status; + DEBUG_ENTRY(0); + ParaNdis_DebugHistory(pContext, hopPowerOn, NULL, 1, 0, 0); + ParaNdis_ResetVirtIONetDevice(pContext); + virtio_add_status(&pContext->IODevice, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); + /* virtio_get_features must be called once upon device initialization: + otherwise the device will not work properly */ + (void)virtio_get_features(&pContext->IODevice); + + if (pContext->bUseMergedBuffers) + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_NET_F_MRG_RXBUF); + if (VirtIODeviceGetHostFeature(pContext, VIRTIO_RING_F_EVENT_IDX)) + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_RING_F_EVENT_IDX); + if (pContext->bDoGuestChecksumOnReceive) + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_NET_F_GUEST_CSUM); + if (VirtIODeviceGetHostFeature(pContext, VIRTIO_F_VERSION_1)) + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_F_VERSION_1); + if (VirtIODeviceGetHostFeature(pContext, VIRTIO_F_ANY_LAYOUT)) + VirtIODeviceEnableGuestFeature(pContext, VIRTIO_F_ANY_LAYOUT); + + status = FinalizeFeatures(pContext); + if (status == NDIS_STATUS_SUCCESS) { + status = FindNetQueues(pContext); + } + if (status != NDIS_STATUS_SUCCESS) { + virtio_add_status(&pContext->IODevice, VIRTIO_CONFIG_S_FAILED); + return status; + } + + ParaNdis_RestoreDeviceConfigurationAfterReset(pContext); + + ParaNdis_UpdateDeviceFilters(pContext); + + InitializeListHead(&TempList); + + /* submit all the receive buffers */ + NdisAcquireSpinLock(&pContext->ReceiveLock); + + pContext->ReuseBufferProc = (tReuseReceiveBufferProc)ReuseReceiveBufferRegular; + + while (!IsListEmpty(&pContext->NetReceiveBuffers)) + { + pIONetDescriptor pBufferDescriptor = + (pIONetDescriptor)RemoveHeadList(&pContext->NetReceiveBuffers); + InsertTailList(&TempList, &pBufferDescriptor->listEntry); + } + pContext->NetNofReceiveBuffers = 0; + while (!IsListEmpty(&TempList)) + { + pIONetDescriptor pBufferDescriptor = + (pIONetDescriptor)RemoveHeadList(&TempList); + if (AddRxBufferToQueue(pContext, pBufferDescriptor)) + { + InsertTailList(&pContext->NetReceiveBuffers, &pBufferDescriptor->listEntry); + pContext->NetNofReceiveBuffers++; + } + else + { + DPrintf(0, ("FAILED TO REUSE THE BUFFER!!!!")); + VirtIONetFreeBufferDescriptor(pContext, pBufferDescriptor); + pContext->NetMaxReceiveBuffers--; + } + } + virtqueue_kick(pContext->NetReceiveQueue); + ParaNdis_SetPowerState(pContext, NdisDeviceStateD0); + pContext->bEnableInterruptHandlingDPC = TRUE; + virtio_device_ready(&pContext->IODevice); + + NdisReleaseSpinLock(&pContext->ReceiveLock); + + // if bFastSuspendInProcess is set by Win8 power-off procedure, + // the ParaNdis_Resume enables Tx and RX + // otherwise it does not do anything in Vista+ (Tx and RX are enabled after power-on by Restart) + ParaNdis_Resume(pContext); + pContext->bFastSuspendInProcess = FALSE; + + ParaNdis_ReportLinkStatus(pContext, TRUE); + ParaNdis_DebugHistory(pContext, hopPowerOn, NULL, 0, 0, 0); + + return status; +} + +VOID ParaNdis_PowerOff(PARANDIS_ADAPTER *pContext) +{ + DEBUG_ENTRY(0); + ParaNdis_DebugHistory(pContext, hopPowerOff, NULL, 1, 0, 0); + + ParaNdis_IndicateConnect(pContext, FALSE, FALSE); + + // if bFastSuspendInProcess is set by Win8 power-off procedure + // the ParaNdis_Suspend does fast Rx stop without waiting (=>srsPausing, if there are some RX packets in Ndis) + pContext->bFastSuspendInProcess = pContext->bNoPauseOnSuspend && pContext->ReceiveState == srsEnabled; + ParaNdis_Suspend(pContext); + if (pContext->IODevice.addr) + { + /* back compat - remove the OK flag only in legacy mode */ + VirtIODeviceRemoveStatus(&pContext->IODevice, VIRTIO_CONFIG_S_DRIVER_OK); + } + + if (pContext->bFastSuspendInProcess) + { + NdisAcquireSpinLock(&pContext->ReceiveLock); + pContext->ReuseBufferProc = (tReuseReceiveBufferProc)ReuseReceiveBufferPowerOff; + NdisReleaseSpinLock(&pContext->ReceiveLock); + } + + ParaNdis_SetPowerState(pContext, NdisDeviceStateD3); + + PreventDPCServicing(pContext); + + /******************************************************************* + shutdown queues to have all the receive buffers under our control + all the transmit buffers move to list of free buffers + ********************************************************************/ + + NdisAcquireSpinLock(&pContext->SendLock); + virtqueue_shutdown(pContext->NetSendQueue); + while (!IsListEmpty(&pContext->NetSendBuffersInUse)) + { + pIONetDescriptor pBufferDescriptor = + (pIONetDescriptor)RemoveHeadList(&pContext->NetSendBuffersInUse); + InsertTailList(&pContext->NetFreeSendBuffers, &pBufferDescriptor->listEntry); + pContext->nofFreeTxDescriptors++; + pContext->nofFreeHardwareBuffers += pBufferDescriptor->nofUsedBuffers; + } + NdisReleaseSpinLock(&pContext->SendLock); + + NdisAcquireSpinLock(&pContext->ReceiveLock); + virtqueue_shutdown(pContext->NetReceiveQueue); + NdisReleaseSpinLock(&pContext->ReceiveLock); + if (pContext->NetControlQueue) { + virtqueue_shutdown(pContext->NetControlQueue); + } + + DPrintf(0, ("WARNING: deleting queues!!!!!!!!!")); + DeleteNetQueues(pContext); + pContext->NetSendQueue = NULL; + pContext->NetReceiveQueue = NULL; + pContext->NetControlQueue = NULL; + + ParaNdis_ResetVirtIONetDevice(pContext); + ParaNdis_DebugHistory(pContext, hopPowerOff, NULL, 0, 0, 0); +} + +void ParaNdis_CallOnBugCheck(PARANDIS_ADAPTER *pContext) +{ + if (pContext->IODevice.isr) + { +#ifdef DBG_USE_VIRTIO_PCI_ISR_FOR_HOST_REPORT + WriteVirtIODeviceByte(pContext->IODevice.isr, 1); +#endif + } +} + +tChecksumCheckResult ParaNdis_CheckRxChecksum(PARANDIS_ADAPTER *pContext, ULONG virtioFlags, PVOID pRxPacket, ULONG len) +{ + tOffloadSettingsFlags f = pContext->Offload.flags; + tChecksumCheckResult res, resIp; + PVOID pIpHeader = RtlOffsetToPointer(pRxPacket, ETH_HEADER_SIZE); + tTcpIpPacketParsingResult ppr; + ULONG flagsToCalculate = 0; + res.value = 0; + resIp.value = 0; + + //VIRTIO_NET_HDR_F_NEEDS_CSUM - we need to calculate TCP/UDP CS + //VIRTIO_NET_HDR_F_DATA_VALID - host tells us TCP/UDP CS is OK + + if (f.fRxIPChecksum) flagsToCalculate |= pcrIpChecksum; // check only + + if (!(virtioFlags & VIRTIO_NET_HDR_F_DATA_VALID)) + { + if (virtioFlags & VIRTIO_NET_HDR_F_NEEDS_CSUM) + { + flagsToCalculate |= pcrFixXxpChecksum | pcrTcpChecksum | pcrUdpChecksum; + } + else + { + if (f.fRxTCPChecksum) flagsToCalculate |= pcrTcpV4Checksum; + if (f.fRxUDPChecksum) flagsToCalculate |= pcrUdpV4Checksum; + if (f.fRxTCPv6Checksum) flagsToCalculate |= pcrTcpV6Checksum; + if (f.fRxUDPv6Checksum) flagsToCalculate |= pcrUdpV6Checksum; + } + } + + ppr = ParaNdis_CheckSumVerify(pIpHeader, len - ETH_HEADER_SIZE, flagsToCalculate, __FUNCTION__); + + if (virtioFlags & VIRTIO_NET_HDR_F_DATA_VALID) + { + pContext->extraStatistics.framesRxCSHwOK++; + ppr.xxpCheckSum = ppresCSOK; + } + + if (ppr.ipStatus == ppresIPV4 && !ppr.IsFragment) + { + if (f.fRxIPChecksum) + { + res.flags.IpOK = ppr.ipCheckSum == ppresCSOK; + res.flags.IpFailed = ppr.ipCheckSum == ppresCSBad; + } + if(ppr.xxpStatus == ppresXxpKnown) + { + if(ppr.TcpUdp == ppresIsTCP) /* TCP */ + { + if (f.fRxTCPChecksum) + { + res.flags.TcpOK = ppr.xxpCheckSum == ppresCSOK || ppr.fixedXxpCS; + res.flags.TcpFailed = !res.flags.TcpOK; + } + } + else /* UDP */ + { + if (f.fRxUDPChecksum) + { + res.flags.UdpOK = ppr.xxpCheckSum == ppresCSOK || ppr.fixedXxpCS; + res.flags.UdpFailed = !res.flags.UdpOK; + } + } + } + } + else if (ppr.ipStatus == ppresIPV6) + { + if(ppr.xxpStatus == ppresXxpKnown) + { + if(ppr.TcpUdp == ppresIsTCP) /* TCP */ + { + if (f.fRxTCPv6Checksum) + { + res.flags.TcpOK = ppr.xxpCheckSum == ppresCSOK || ppr.fixedXxpCS; + res.flags.TcpFailed = !res.flags.TcpOK; + } + } + else /* UDP */ + { + if (f.fRxUDPv6Checksum) + { + res.flags.UdpOK = ppr.xxpCheckSum == ppresCSOK || ppr.fixedXxpCS; + res.flags.UdpFailed = !res.flags.UdpOK; + } + } + } + } + + if (pContext->bDoIPCheckRx && + (f.fRxIPChecksum || f.fRxTCPChecksum || f.fRxUDPChecksum || f.fRxTCPv6Checksum || f.fRxUDPv6Checksum)) + { + ppr = ParaNdis_CheckSumVerify(pIpHeader, len - ETH_HEADER_SIZE, pcrAnyChecksum, __FUNCTION__); + if (ppr.ipStatus == ppresIPV4 && !ppr.IsFragment) + { + resIp.flags.IpOK = !!f.fRxIPChecksum && ppr.ipCheckSum == ppresCSOK; + resIp.flags.IpFailed = !!f.fRxIPChecksum && ppr.ipCheckSum == ppresCSBad; + if (f.fRxTCPChecksum && ppr.xxpStatus == ppresXxpKnown && ppr.TcpUdp == ppresIsTCP) + { + resIp.flags.TcpOK = ppr.xxpCheckSum == ppresCSOK; + resIp.flags.TcpFailed = ppr.xxpCheckSum == ppresCSBad; + } + if (f.fRxUDPChecksum && ppr.xxpStatus == ppresXxpKnown && ppr.TcpUdp == ppresIsUDP) + { + resIp.flags.UdpOK = ppr.xxpCheckSum == ppresCSOK; + resIp.flags.UdpFailed = ppr.xxpCheckSum == ppresCSBad; + } + } + else if (ppr.ipStatus == ppresIPV6) + { + if (f.fRxTCPv6Checksum && ppr.xxpStatus == ppresXxpKnown && ppr.TcpUdp == ppresIsTCP) + { + resIp.flags.TcpOK = ppr.xxpCheckSum == ppresCSOK; + resIp.flags.TcpFailed = ppr.xxpCheckSum == ppresCSBad; + } + if (f.fRxUDPv6Checksum && ppr.xxpStatus == ppresXxpKnown && ppr.TcpUdp == ppresIsUDP) + { + resIp.flags.UdpOK = ppr.xxpCheckSum == ppresCSOK; + resIp.flags.UdpFailed = ppr.xxpCheckSum == ppresCSBad; + } + } + + if (res.value != resIp.value) + { + // if HW did not set some bits that IP checker set, it is a mistake: + // or GOOD CS is not labeled, or BAD checksum is not labeled + tChecksumCheckResult diff; + diff.value = resIp.value & ~res.value; + if (diff.flags.IpFailed || diff.flags.TcpFailed || diff.flags.UdpFailed) + pContext->extraStatistics.framesRxCSHwMissedBad++; + if (diff.flags.IpOK || diff.flags.TcpOK || diff.flags.UdpOK) + pContext->extraStatistics.framesRxCSHwMissedGood++; + if (diff.value) + { + DPrintf(0, ("[%s] real %X <> %X (virtio %X)", __FUNCTION__, resIp.value, res.value, virtioFlags)); + } + res.value = resIp.value; + } + } + + return res; +} diff --git a/drivers/network/dd/netkvm/Common/ParaNdis-Debug.c b/drivers/network/dd/netkvm/Common/ParaNdis-Debug.c new file mode 100644 index 00000000000..f66b92502f8 --- /dev/null +++ b/drivers/network/dd/netkvm/Common/ParaNdis-Debug.c @@ -0,0 +1,394 @@ +/* + * This file contains debug support procedures, common for NDIS5 and NDIS6 + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "ndis56common.h" +#include "stdarg.h" +#include "ntstrsafe.h" + +//#define OVERRIDE_DEBUG_BREAK + +#ifdef WPP_EVENT_TRACING +#include "ParaNdis-Debug.tmh" +#endif + +int virtioDebugLevel = 1; +int nDebugLevel = 1; +int bDebugPrint = 1; + +static NDIS_SPIN_LOCK CrashLock; + +static KBUGCHECK_REASON_CALLBACK_ROUTINE ParaNdis_OnBugCheck; +static VOID NTAPI ParaNdis_OnBugCheck( + IN KBUGCHECK_CALLBACK_REASON Reason, + IN PKBUGCHECK_REASON_CALLBACK_RECORD Record, + IN OUT PVOID ReasonSpecificData, + IN ULONG ReasonSpecificDataLength +); +static VOID ParaNdis_PrepareBugCheckData(); + +typedef BOOLEAN (*KeRegisterBugCheckReasonCallbackType) ( + __out PKBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord, + __in PKBUGCHECK_REASON_CALLBACK_ROUTINE CallbackRoutine, + __in KBUGCHECK_CALLBACK_REASON Reason, + __in PUCHAR Component + ); + +typedef BOOLEAN (*KeDeregisterBugCheckReasonCallbackType) ( + __inout PKBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord + ); + +typedef ULONG (*vDbgPrintExType)( + __in ULONG ComponentId, + __in ULONG Level, + __in PCCH Format, + __in va_list arglist + ); + +static ULONG DummyPrintProcedure( + __in ULONG ComponentId, + __in ULONG Level, + __in PCCH Format, + __in va_list arglist + ) +{ + return 0; +} +static BOOLEAN KeRegisterBugCheckReasonCallbackDummyProc( + __out PKBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord, + __in PKBUGCHECK_REASON_CALLBACK_ROUTINE CallbackRoutine, + __in KBUGCHECK_CALLBACK_REASON Reason, + __in PUCHAR Component + ) +{ + CallbackRecord->State = 0; + return FALSE; +} + +BOOLEAN KeDeregisterBugCheckReasonCallbackDummyProc( + __inout PKBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord + ) +{ + return FALSE; +} + +static vDbgPrintExType PrintProcedure = DummyPrintProcedure; +static KeRegisterBugCheckReasonCallbackType BugCheckRegisterCallback = KeRegisterBugCheckReasonCallbackDummyProc; +static KeDeregisterBugCheckReasonCallbackType BugCheckDeregisterCallback = KeDeregisterBugCheckReasonCallbackDummyProc; +KBUGCHECK_REASON_CALLBACK_RECORD CallbackRecord; + +#if !defined(WPP_EVENT_TRACING) || defined(WPP_USE_BYPASS) +#if defined(DPFLTR_MASK) + +//common case, except Win2K +static void DebugPrint(const char *fmt, ...) +{ + va_list list; + va_start(list, fmt); + PrintProcedure(DPFLTR_DEFAULT_ID, 9 | DPFLTR_MASK, fmt, list); +#if defined(VIRTIO_DBG_USE_IOPORT) + { + NTSTATUS status; + // use this way of output only for DISPATCH_LEVEL, + // higher requires more protection + if (KeGetCurrentIrql() <= DISPATCH_LEVEL) + { + char buf[256]; + size_t len, i; + buf[0] = 0; + status = RtlStringCbVPrintfA(buf, sizeof(buf), fmt, list); + if (status == STATUS_SUCCESS) len = strlen(buf); + else if (status == STATUS_BUFFER_OVERFLOW) len = sizeof(buf); + else { memcpy(buf, "Can't print", 11); len = 11; } + NdisAcquireSpinLock(&CrashLock); + for (i = 0; i < len; ++i) + { + NdisRawWritePortUchar(VIRTIO_DBG_USE_IOPORT, buf[i]); + } + NdisRawWritePortUchar(VIRTIO_DBG_USE_IOPORT, '\n'); + NdisReleaseSpinLock(&CrashLock); + } + } +#endif +} + +DEBUGPRINTFUNC pDebugPrint = DebugPrint; +DEBUGPRINTFUNC VirtioDebugPrintProc = DebugPrint; + +#else //DPFLTR_MASK +#pragma message("DebugPrint for Win2K") + +DEBUGPRINTFUNC pDebugPrint = DbgPrint; +DEBUGPRINTFUNC VirtioDebugPrintProc = DbgPrint; + +#endif //DPFLTR_MASK +#endif //!defined(WPP_EVENT_TRACING) || defined(WPP_USE_BYPASS) + + + +void _LogOutEntry(int level, const char *s) +{ + DPrintf(level, ("[%s]=>", s)); +} + +void _LogOutExitValue(int level, const char *s, ULONG value) +{ + DPrintf(level, ("[%s]<=0x%X", s, value)); +} + +void _LogOutString(int level, const char *s) +{ + DPrintf(level, ("[%s]", s)); +} + +VOID WppEnableCallback( + __in LPCGUID Guid, + __in __int64 Logger, + __in BOOLEAN Enable, + __in ULONG Flags, + __in UCHAR Level) +{ +#if WPP_USE_BYPASS + DPrintfBypass(0, ("[%s] %s, flags %X, level %d", + __FUNCTION__, Enable ? "enabled" : "disabled", + Flags, (ULONG)Level)); +#endif + nDebugLevel = Level; + bDebugPrint = Enable; +} + + +#ifdef OVERRIDE_DEBUG_BREAK +static PUCHAR pDbgBreakPoint; +static UCHAR DbgBreakPointChunk[5]; +static void AnotherDbgBreak() +{ + DPrintf(0, ("Somebody tried to break into the debugger!")); +} +#endif + +void ParaNdis_DebugInitialize(PVOID DriverObject,PVOID RegistryPath) +{ + NDIS_STRING usRegister, usDeregister, usPrint; + PVOID pr, pd; + BOOLEAN res; + WPP_INIT_TRACING(DriverObject, RegistryPath); + + NdisAllocateSpinLock(&CrashLock); + KeInitializeCallbackRecord(&CallbackRecord); + ParaNdis_PrepareBugCheckData(); + NdisInitUnicodeString(&usPrint, L"vDbgPrintEx"); + NdisInitUnicodeString(&usRegister, L"KeRegisterBugCheckReasonCallback"); + NdisInitUnicodeString(&usDeregister, L"KeDeregisterBugCheckReasonCallback"); + pd = MmGetSystemRoutineAddress(&usPrint); + if (pd) PrintProcedure = (vDbgPrintExType)pd; + pr = MmGetSystemRoutineAddress(&usRegister); + pd = MmGetSystemRoutineAddress(&usDeregister); + if (pr && pd) + { + BugCheckRegisterCallback = (KeRegisterBugCheckReasonCallbackType)pr; + BugCheckDeregisterCallback = (KeDeregisterBugCheckReasonCallbackType)pd; + } + res = BugCheckRegisterCallback(&CallbackRecord, ParaNdis_OnBugCheck, KbCallbackSecondaryDumpData, "NetKvm"); + DPrintf(0, ("[%s] Crash callback %sregistered", __FUNCTION__, res ? "" : "NOT ")); + +#ifdef OVERRIDE_DEBUG_BREAK + if (sizeof(PVOID) == sizeof(ULONG)) + { + UCHAR replace[5] = {0xe9,0,0,0,0}; + ULONG replacement; + NDIS_STRING usDbgBreakPointName; + NdisInitUnicodeString(&usDbgBreakPointName, L"DbgBreakPoint"); + pDbgBreakPoint = (PUCHAR)MmGetSystemRoutineAddress(&usDbgBreakPointName); + if (pDbgBreakPoint) + { + DPrintf(0, ("Replacing original BP handler at %p", pDbgBreakPoint)); + replacement = RtlPointerToOffset(pDbgBreakPoint + 5, AnotherDbgBreak); + RtlCopyMemory(replace + 1, &replacement, sizeof(replacement)); + RtlCopyMemory(DbgBreakPointChunk, pDbgBreakPoint, sizeof(DbgBreakPointChunk)); + RtlCopyMemory(pDbgBreakPoint, replace, sizeof(replace)); + } + } +#endif +} + +void ParaNdis_DebugCleanup(PDRIVER_OBJECT pDriverObject) +{ +#ifdef OVERRIDE_DEBUG_BREAK + if (sizeof(PVOID) == sizeof(ULONG) && pDbgBreakPoint) + { + DPrintf(0, ("Restoring original BP handler at %p", pDbgBreakPoint)); + RtlCopyMemory(pDbgBreakPoint, DbgBreakPointChunk, sizeof(DbgBreakPointChunk)); + } +#endif + BugCheckDeregisterCallback(&CallbackRecord); + WPP_CLEANUP(pDriverObject); +} + + +#define MAX_CONTEXTS 4 +#if defined(ENABLE_HISTORY_LOG) +#define MAX_HISTORY 0x40000 +#else +#define MAX_HISTORY 2 +#endif +typedef struct _tagBugCheckStaticData +{ + tBugCheckStaticDataHeader Header; + tBugCheckPerNicDataContent PerNicData[MAX_CONTEXTS]; + tBugCheckStaticDataContent Data; + tBugCheckHistoryDataEntry History[MAX_HISTORY]; +}tBugCheckStaticData; + + +typedef struct _tagBugCheckData +{ + tBugCheckStaticData StaticData; + tBugCheckDataLocation Location; +}tBugCheckData; + +static tBugCheckData BugCheckData; +static BOOLEAN bNative = TRUE; + +VOID ParaNdis_PrepareBugCheckData() +{ + BugCheckData.StaticData.Header.StaticDataVersion = PARANDIS_DEBUG_STATIC_DATA_VERSION; + BugCheckData.StaticData.Header.PerNicDataVersion = PARANDIS_DEBUG_PER_NIC_DATA_VERSION; + BugCheckData.StaticData.Header.ulMaxContexts = MAX_CONTEXTS; + BugCheckData.StaticData.Header.SizeOfPointer = sizeof(PVOID); + BugCheckData.StaticData.Header.PerNicData = (UINT_PTR)(PVOID)BugCheckData.StaticData.PerNicData; + BugCheckData.StaticData.Header.DataArea = (UINT64)&BugCheckData.StaticData.Data; + BugCheckData.StaticData.Header.DataAreaSize = sizeof(BugCheckData.StaticData.Data); + BugCheckData.StaticData.Data.HistoryDataVersion = PARANDIS_DEBUG_HISTORY_DATA_VERSION; + BugCheckData.StaticData.Data.SizeOfHistory = MAX_HISTORY; + BugCheckData.StaticData.Data.SizeOfHistoryEntry = sizeof(tBugCheckHistoryDataEntry); + BugCheckData.StaticData.Data.HistoryData = (UINT_PTR)(PVOID)BugCheckData.StaticData.History; + BugCheckData.Location.Address = (UINT64)&BugCheckData; + BugCheckData.Location.Size = sizeof(BugCheckData); +} + +void ParaNdis_DebugRegisterMiniport(PARANDIS_ADAPTER *pContext, BOOLEAN bRegister) +{ + UINT i; + NdisAcquireSpinLock(&CrashLock); + for (i = 0; i < MAX_CONTEXTS; ++i) + { + UINT64 val1 = bRegister ? 0 : (UINT_PTR)pContext; + UINT64 val2 = bRegister ? (UINT_PTR)pContext : 0; + if (BugCheckData.StaticData.PerNicData[i].Context != val1) continue; + BugCheckData.StaticData.PerNicData[i].Context = val2; + break; + } + NdisReleaseSpinLock(&CrashLock); +} + +static UINT FillDataOnBugCheck() +{ + UINT i, n = 0; + NdisGetCurrentSystemTime(&BugCheckData.StaticData.Header.qCrashTime); + for (i = 0; i < MAX_CONTEXTS; ++i) + { + tBugCheckPerNicDataContent *pSave = &BugCheckData.StaticData.PerNicData[i]; + PARANDIS_ADAPTER *p = (PARANDIS_ADAPTER *)pSave->Context; + if (!p) continue; + pSave->nofPacketsToComplete = p->NetTxPacketsToReturn; + pSave->nofReadyTxBuffers = p->nofFreeHardwareBuffers; + pSave->LastInterruptTimeStamp.QuadPart = PARANDIS_GET_LAST_INTERRUPT_TIMESTAMP(p); + pSave->LastTxCompletionTimeStamp = p->LastTxCompletionTimeStamp; + ParaNdis_CallOnBugCheck(p); + ++n; + } + return n; +} + +VOID NTAPI ParaNdis_OnBugCheck( + IN KBUGCHECK_CALLBACK_REASON Reason, + IN PKBUGCHECK_REASON_CALLBACK_RECORD Record, + IN OUT PVOID ReasonSpecificData, + IN ULONG ReasonSpecificDataLength + ) +{ + KBUGCHECK_SECONDARY_DUMP_DATA *pDump = (KBUGCHECK_SECONDARY_DUMP_DATA *)ReasonSpecificData; + if (KbCallbackSecondaryDumpData == Reason && ReasonSpecificDataLength >= sizeof(*pDump)) + { + ULONG dumpSize = sizeof(BugCheckData.Location); + if (!pDump->OutBuffer) + { + UINT nSaved; + nSaved = FillDataOnBugCheck(); + if (pDump->InBufferLength >= dumpSize) + { + pDump->OutBuffer = pDump->InBuffer; + pDump->OutBufferLength = dumpSize; + } + else + { + pDump->OutBuffer = &BugCheckData.Location; + pDump->OutBufferLength = dumpSize; + bNative = FALSE; + } + DPrintf(0, ("[%s] system buffer of %d, saving data for %d NIC", __FUNCTION__,pDump->InBufferLength, nSaved)); + DPrintf(0, ("[%s] using %s buffer", __FUNCTION__, bNative ? "native" : "own")); + } + else if (pDump->OutBuffer == pDump->InBuffer) + { + RtlCopyMemory(&pDump->Guid, &ParaNdis_CrashGuid, sizeof(pDump->Guid)); + RtlCopyMemory(pDump->InBuffer, &BugCheckData.Location, dumpSize); + pDump->OutBufferLength = dumpSize; + DPrintf(0, ("[%s] written %d to %p", __FUNCTION__, (ULONG)BugCheckData.Location.Size, (UINT_PTR)BugCheckData.Location.Address )); + DPrintf(0, ("[%s] dump data (%d) at %p", __FUNCTION__, pDump->OutBufferLength, pDump->OutBuffer)); + } + } +} + +#if defined(ENABLE_HISTORY_LOG) +void ParaNdis_DebugHistory( + PARANDIS_ADAPTER *pContext, + eHistoryLogOperation op, + PVOID pParam1, + ULONG lParam2, + ULONG lParam3, + ULONG lParam4) +{ + tBugCheckHistoryDataEntry *phe; + ULONG index = InterlockedIncrement(&BugCheckData.StaticData.Data.CurrentHistoryIndex); + index = (index - 1) % MAX_HISTORY; + phe = &BugCheckData.StaticData.History[index]; + phe->Context = (UINT_PTR)pContext; + phe->operation = op; + phe->pParam1 = (UINT_PTR)pParam1; + phe->lParam2 = lParam2; + phe->lParam3 = lParam3; + phe->lParam4 = lParam4; +#if (PARANDIS_DEBUG_HISTORY_DATA_VERSION == 1) + phe->uIRQL = KeGetCurrentIrql(); + phe->uProcessor = KeGetCurrentProcessorNumber(); +#endif + NdisGetCurrentSystemTime(&phe->TimeStamp); +} + +#endif diff --git a/drivers/network/dd/netkvm/Common/ParaNdis-Oid.c b/drivers/network/dd/netkvm/Common/ParaNdis-Oid.c new file mode 100644 index 00000000000..8d54b445dca --- /dev/null +++ b/drivers/network/dd/netkvm/Common/ParaNdis-Oid.c @@ -0,0 +1,677 @@ +/* + * This file contains NDIS OID support procedures, common for NDIS5 and NDIS6 + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "ParaNdis-Oid.h" + +#ifdef WPP_EVENT_TRACING +#include "ParaNdis-Oid.tmh" +#endif +#include + +static const char VendorName[] = "Red Hat"; + +static UCHAR FORCEINLINE hexdigit(UCHAR nibble) +{ + UCHAR c = nibble & 0xf; + c += (c <= 9) ? 0 : 7; + c += '0'; + return c; +} + +/********************************************************** +Common implementation of copy operation when OID is set +pOid->Flags (if used) controls when the source data may be truncated or padded on copy +Parameters: + tOidDesc *pOid - descriptor of OID + PVOID pDest - buffer to receive data sent by NDIS + ULONG ulSize - size of data to copy +Return value: + SUCCESS or NDIS error code if target buffer size is wrong +Rules: + +PDEST <>OK SIZE PAYLOAD SZ +NULL any n/a any fail +BUFF any 0 any success, none copied +BUFF any SZ ==SZ success, copied SZ +BUFF !lessok SZ SZ fail (overflow), none copied +BUFF lessok SZ SZ success, copied SZ +***************************************************/ +NDIS_STATUS ParaNdis_OidSetCopy( + tOidDesc *pOid, + PVOID pDest, + ULONG ulSize) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + if (!pDest) + { + status = NDIS_STATUS_INVALID_OID; + *(pOid->pBytesRead) = 0; + *(pOid->pBytesNeeded) = 0; + } + else if (ulSize) + { + if (pOid->InformationBufferLength < ulSize) + { + if (pOid->ulToDoFlags & ohfSetLessOK) + { + *(pOid->pBytesRead) = pOid->InformationBufferLength; + NdisZeroMemory(pDest, ulSize); + NdisMoveMemory(pDest, pOid->InformationBuffer, pOid->InformationBufferLength); + } + else + { + status = NDIS_STATUS_BUFFER_TOO_SHORT; + *(pOid->pBytesRead) = 0; + *(pOid->pBytesNeeded) = ulSize; + } + } + else if (pOid->InformationBufferLength == ulSize || (pOid->ulToDoFlags & ohfSetMoreOK)) + { + *(pOid->pBytesRead) = ulSize; + NdisMoveMemory(pDest, pOid->InformationBuffer, ulSize); + } + else + { + status = NDIS_STATUS_BUFFER_OVERFLOW; + *(pOid->pBytesNeeded) = ulSize; + *(pOid->pBytesRead) = 0; + } + } + else + { + *(pOid->pBytesRead) = pOid->InformationBufferLength; + } + return status; +} + + +/********************************************************** +Common handler of setting packet filter +***********************************************************/ +NDIS_STATUS ParaNdis_OnSetPacketFilter(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + ULONG newValue; + NDIS_STATUS status = ParaNdis_OidSetCopy( + pOid, + &newValue, + sizeof(newValue)); + + if (newValue & ~PARANDIS_PACKET_FILTERS) + status = NDIS_STATUS_INVALID_DATA; + + if (status == NDIS_STATUS_SUCCESS) + { + pContext->PacketFilter = newValue; + DPrintf(1, ("[%s] PACKET FILTER SET TO %x", __FUNCTION__, pContext->PacketFilter)); + ParaNdis_UpdateDeviceFilters(pContext); + } + return status; +} + +void ParaNdis_FillPowerCapabilities(PNDIS_PNP_CAPABILITIES pCaps) +{ + NdisZeroMemory(pCaps, sizeof(*pCaps)); + pCaps->WakeUpCapabilities.MinMagicPacketWakeUp = NdisDeviceStateUnspecified; + pCaps->WakeUpCapabilities.MinPatternWakeUp = NdisDeviceStateUnspecified; + pCaps->WakeUpCapabilities.MinLinkChangeWakeUp = NdisDeviceStateUnspecified; +} + + +/********************************************************** +Common handler of setting multicast list +***********************************************************/ +NDIS_STATUS ParaNdis_OnOidSetMulticastList(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + NDIS_STATUS status; + status = ParaNdis_SetMulticastList( + pContext, + pOid->InformationBuffer, + pOid->InformationBufferLength, + pOid->pBytesRead, + pOid->pBytesNeeded); + ParaNdis_UpdateDeviceFilters(pContext); + return status; +} + +/********************************************************** +Common helper of copy operation on GET OID +Copies data from specified location to NDIS buffer +64-bit variable will be casted to 32-bit, if specified on pOid->Flags + +Parameters: + tOidDesc *pOid - descriptor of OID + PVOID pInfo - source to copy from + ULONG ulSize - source info size +Return value: + SUCCESS or kind of failure when the dest buffer size is wrong +Comments: +pInfo must be non-NULL, otherwise error returned +ulSize may be 0, then SUCCESS returned without copy +***********************************************************/ +NDIS_STATUS ParaNdis_OidQueryCopy( + tOidDesc *pOid, + PVOID pInfo, + ULONG ulSize, + BOOLEAN bFreeInfo) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + *(pOid->pBytesNeeded) = ulSize; + if (!pInfo) + { + status = NDIS_STATUS_INVALID_OID; + *(pOid->pBytesWritten) = 0; + *(pOid->pBytesNeeded) = 0; + } + else if (pOid->InformationBufferLength >= ulSize) + { + if (ulSize) NdisMoveMemory(pOid->InformationBuffer, pInfo, ulSize); + *(pOid->pBytesWritten) = ulSize; + *(pOid->pBytesNeeded) = 0; + } + else if ((pOid->ulToDoFlags & ohfQuery3264) && pOid->InformationBufferLength == sizeof(ULONG) && ulSize == sizeof(ULONG64)) + { + ULONG64 u64 = *(ULONG64 *)pInfo; + ULONG ul = (ULONG)u64; + NdisMoveMemory(pOid->InformationBuffer, &ul, sizeof(ul)); + *(pOid->pBytesWritten) = sizeof(ul); + } + else + { + status = NDIS_STATUS_BUFFER_TOO_SHORT; + *(pOid->pBytesWritten) = 0; + } + if (bFreeInfo && pInfo) + { + NdisFreeMemory(pInfo, 0, 0); + } + return status; +} + +/********************************************************** +Common handler of Oid queries +Parameters: + context + tOidDesc *pOid - filled descriptor of OID operation +Return value: + SUCCESS or kind of failure +***********************************************************/ +NDIS_STATUS ParaNdis_OidQueryCommon(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + PVOID pInfo = NULL; + ULONG ulSize = 0; + BOOLEAN bFreeInfo = FALSE; + union _tagtemp + { + NDIS_MEDIUM Medium; + ULONG64 ul64; + ULONG ul; + USHORT us; + NDIS_PNP_CAPABILITIES PMCaps; + } u; +#if defined(_MSC_VER) + #define CONCATFIELD(object, field) object.##field +#else + #define CONCATFIELD(object, field) object.field +#endif +#define SETINFO(field, value) pInfo = CONCATFIELD(&u, field); ulSize = sizeof(CONCATFIELD(u, field)); CONCATFIELD(u, field) = (value) + switch (pOid->Oid) + { + case OID_GEN_SUPPORTED_LIST: + ParaNdis_GetSupportedOid(&pInfo, &ulSize); + break; + case OID_GEN_HARDWARE_STATUS: + SETINFO(ul, NdisHardwareStatusReady); + break; + case OID_GEN_MEDIA_SUPPORTED: + __fallthrough; + case OID_GEN_MEDIA_IN_USE: + SETINFO(Medium, NdisMedium802_3); + break; + case OID_GEN_MAXIMUM_LOOKAHEAD: + SETINFO(ul, pContext->MaxPacketSize.nMaxFullSizeOS); + break; + case OID_GEN_MAXIMUM_FRAME_SIZE: + SETINFO(ul, pContext->MaxPacketSize.nMaxDataSize); + break; + case OID_GEN_TRANSMIT_BUFFER_SPACE: + SETINFO(ul, pContext->MaxPacketSize.nMaxFullSizeOS * pContext->nofFreeTxDescriptors); + break; + case OID_GEN_RECEIVE_BUFFER_SPACE: + SETINFO(ul, pContext->MaxPacketSize.nMaxFullSizeOS * pContext->NetMaxReceiveBuffers); + break; + case OID_GEN_RECEIVE_BLOCK_SIZE: + __fallthrough; + case OID_GEN_TRANSMIT_BLOCK_SIZE: + __fallthrough; + case OID_GEN_MAXIMUM_TOTAL_SIZE: + SETINFO(ul, pContext->MaxPacketSize.nMaxFullSizeOS); + break; + case OID_GEN_TRANSMIT_QUEUE_LENGTH: + // TODO: this is not completely correct, but only if + // the TX queue is not full + SETINFO(ul, pContext->maxFreeTxDescriptors - pContext->nofFreeTxDescriptors); + break; + case OID_GEN_VENDOR_ID: + SETINFO(ul, 0x00ffffff); + break; + case OID_GEN_VENDOR_DESCRIPTION: + pInfo = (PVOID)VendorName; + ulSize = sizeof(VendorName); + break; + + case OID_GEN_VENDOR_DRIVER_VERSION: + SETINFO(ul, (NDIS_MINIPORT_MAJOR_VERSION << 16) | NDIS_MINIPORT_MINOR_VERSION); + break; + case OID_GEN_CURRENT_PACKET_FILTER: + pInfo = &pContext->PacketFilter; + ulSize = sizeof(pContext->PacketFilter); + break; + case OID_GEN_DRIVER_VERSION: + SETINFO(us, ((NDIS_MINIPORT_MAJOR_VERSION << 8) | NDIS_MINIPORT_MINOR_VERSION)); + break; + case OID_GEN_MAC_OPTIONS: + { + ULONG options = NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA | + NDIS_MAC_OPTION_TRANSFERS_NOT_PEND | + NDIS_MAC_OPTION_NO_LOOPBACK; + if (IsPrioritySupported(pContext)) + options |= NDIS_MAC_OPTION_8021P_PRIORITY; + if (IsVlanSupported(pContext)) + options |= NDIS_MAC_OPTION_8021Q_VLAN; + SETINFO(ul, options); + } + break; + case OID_GEN_MEDIA_CONNECT_STATUS: + SETINFO(ul, pContext->bConnected ? NdisMediaStateConnected : NdisMediaStateDisconnected); + //NdisMediaStateConnected: + break; + case OID_GEN_MAXIMUM_SEND_PACKETS: + // NDIS ignores it for deserialized drivers + SETINFO(ul,pContext->nofFreeTxDescriptors); + break; + case OID_802_3_PERMANENT_ADDRESS: + pInfo = pContext->PermanentMacAddress; + ulSize = sizeof(pContext->PermanentMacAddress); + break; + case OID_802_3_CURRENT_ADDRESS: + pInfo = pContext->CurrentMacAddress; + ulSize = sizeof(pContext->CurrentMacAddress); + break; + case OID_PNP_QUERY_POWER: + // size if 0, just to indicate success + pInfo = &status; + break; + case OID_GEN_DIRECTED_BYTES_XMIT: + SETINFO(ul64, pContext->Statistics.ifHCOutUcastOctets); + break; + case OID_GEN_DIRECTED_FRAMES_XMIT: + SETINFO(ul64, pContext->Statistics.ifHCOutUcastPkts); + break; + case OID_GEN_MULTICAST_BYTES_XMIT: + SETINFO(ul64, pContext->Statistics.ifHCOutMulticastOctets); + break; + case OID_GEN_MULTICAST_FRAMES_XMIT: + SETINFO(ul64, pContext->Statistics.ifHCOutMulticastPkts); + break; + case OID_GEN_BROADCAST_BYTES_XMIT: + SETINFO(ul64, pContext->Statistics.ifHCOutBroadcastOctets); + break; + case OID_GEN_BROADCAST_FRAMES_XMIT: + SETINFO(ul64, pContext->Statistics.ifHCOutBroadcastPkts); + break; + case OID_GEN_DIRECTED_BYTES_RCV: + SETINFO(ul64, pContext->Statistics.ifHCInUcastOctets); + break; + case OID_GEN_DIRECTED_FRAMES_RCV: + SETINFO(ul64, pContext->Statistics.ifHCInUcastPkts); + break; + case OID_GEN_MULTICAST_BYTES_RCV: + SETINFO(ul64, pContext->Statistics.ifHCInMulticastOctets); + break; + case OID_GEN_MULTICAST_FRAMES_RCV: + SETINFO(ul64, pContext->Statistics.ifHCInMulticastPkts); + break; + case OID_GEN_BROADCAST_BYTES_RCV: + SETINFO(ul64, pContext->Statistics.ifHCInBroadcastOctets); + break; + case OID_GEN_BROADCAST_FRAMES_RCV: + SETINFO(ul64, pContext->Statistics.ifHCInBroadcastPkts); + break; + case OID_GEN_XMIT_OK: + SETINFO(ul64, + pContext->Statistics.ifHCOutUcastPkts + + pContext->Statistics.ifHCOutMulticastPkts + + pContext->Statistics.ifHCOutBroadcastPkts); + break; + case OID_GEN_RCV_OK: + SETINFO(ul64, + pContext->Statistics.ifHCInUcastPkts + + pContext->Statistics.ifHCInMulticastPkts + + pContext->Statistics.ifHCInBroadcastPkts); + DPrintf(4, ("[%s] Total frames %I64u", __FUNCTION__, u.ul64)); + break; + case OID_GEN_XMIT_ERROR: + SETINFO(ul64, pContext->Statistics.ifOutErrors ); + break; + case OID_GEN_RCV_ERROR: + __fallthrough; + case OID_GEN_RCV_NO_BUFFER: + __fallthrough; + case OID_802_3_RCV_OVERRUN: + __fallthrough; + case OID_GEN_RCV_CRC_ERROR: + __fallthrough; + case OID_802_3_RCV_ERROR_ALIGNMENT: + __fallthrough; + case OID_802_3_XMIT_UNDERRUN: + __fallthrough; + case OID_802_3_XMIT_ONE_COLLISION: + __fallthrough; + case OID_802_3_XMIT_DEFERRED: + __fallthrough; + case OID_802_3_XMIT_MAX_COLLISIONS: + __fallthrough; + case OID_802_3_XMIT_MORE_COLLISIONS: + __fallthrough; + case OID_802_3_XMIT_HEARTBEAT_FAILURE: + __fallthrough; + case OID_802_3_XMIT_TIMES_CRS_LOST: + __fallthrough; + case OID_802_3_XMIT_LATE_COLLISIONS: + SETINFO(ul64, 0); + break; + case OID_802_3_MULTICAST_LIST: + pInfo = pContext->MulticastData.MulticastList; + ulSize = pContext->MulticastData.nofMulticastEntries * ETH_LENGTH_OF_ADDRESS; + break; + case OID_802_3_MAXIMUM_LIST_SIZE: + SETINFO(ul, PARANDIS_MULTICAST_LIST_SIZE); + break; + case OID_PNP_CAPABILITIES: + pInfo = &u.PMCaps; + ulSize = sizeof(u.PMCaps); + ParaNdis_FillPowerCapabilities(&u.PMCaps); + break; + case OID_802_3_MAC_OPTIONS: + SETINFO(ul, 0); + break; + case OID_GEN_VLAN_ID: + SETINFO(ul, pContext->VlanId); + if (!IsVlanSupported(pContext)) + status = NDIS_STATUS_NOT_SUPPORTED; + break; + case OID_GEN_CURRENT_LOOKAHEAD: + if (!pContext->DummyLookAhead) pContext->DummyLookAhead = pContext->MaxPacketSize.nMaxFullSizeOS; + pInfo = &pContext->DummyLookAhead; + ulSize = sizeof(pContext->DummyLookAhead); + break; + case OID_PNP_ENABLE_WAKE_UP: + SETINFO(ul, pContext->ulEnableWakeup); + break; + default: + status = NDIS_STATUS_NOT_SUPPORTED; + break; + } + + if (status == NDIS_STATUS_SUCCESS) + { + status = ParaNdis_OidQueryCopy(pOid, pInfo, ulSize, bFreeInfo); + } + + return status; +} + + +/********************************************************** + Just gets OID name +***********************************************************/ +const char *ParaNdis_OidName(NDIS_OID oid) +{ +#undef MAKECASE +#define MAKECASE(id) case id: return #id; + switch (oid) + { + MAKECASE(OID_GEN_SUPPORTED_LIST) + MAKECASE(OID_GEN_HARDWARE_STATUS) + MAKECASE(OID_GEN_MEDIA_SUPPORTED) + MAKECASE(OID_GEN_MEDIA_IN_USE) + MAKECASE(OID_GEN_MAXIMUM_LOOKAHEAD) + MAKECASE(OID_GEN_MAXIMUM_FRAME_SIZE) + MAKECASE(OID_GEN_LINK_SPEED) + MAKECASE(OID_GEN_TRANSMIT_BUFFER_SPACE) + MAKECASE(OID_GEN_RECEIVE_BUFFER_SPACE) + MAKECASE(OID_GEN_TRANSMIT_BLOCK_SIZE) + MAKECASE(OID_GEN_RECEIVE_BLOCK_SIZE) + MAKECASE(OID_GEN_VENDOR_ID) + MAKECASE(OID_GEN_VENDOR_DESCRIPTION) + MAKECASE(OID_GEN_CURRENT_PACKET_FILTER) + MAKECASE(OID_GEN_CURRENT_LOOKAHEAD) + MAKECASE(OID_GEN_DRIVER_VERSION) + MAKECASE(OID_GEN_MAXIMUM_TOTAL_SIZE) + MAKECASE(OID_GEN_PROTOCOL_OPTIONS) + MAKECASE(OID_GEN_MAC_OPTIONS) + MAKECASE(OID_GEN_MEDIA_CONNECT_STATUS) + MAKECASE(OID_GEN_MAXIMUM_SEND_PACKETS) + MAKECASE(OID_GEN_VENDOR_DRIVER_VERSION) + MAKECASE(OID_GEN_SUPPORTED_GUIDS) + MAKECASE(OID_GEN_TRANSPORT_HEADER_OFFSET) + MAKECASE(OID_GEN_MEDIA_CAPABILITIES) + MAKECASE(OID_GEN_PHYSICAL_MEDIUM) + MAKECASE(OID_GEN_XMIT_OK) + MAKECASE(OID_GEN_RCV_OK) + MAKECASE(OID_GEN_XMIT_ERROR) + MAKECASE(OID_GEN_RCV_ERROR) + MAKECASE(OID_GEN_RCV_NO_BUFFER) + MAKECASE(OID_GEN_DIRECTED_BYTES_XMIT) + MAKECASE(OID_GEN_DIRECTED_FRAMES_XMIT) + MAKECASE(OID_GEN_MULTICAST_BYTES_XMIT) + MAKECASE(OID_GEN_MULTICAST_FRAMES_XMIT) + MAKECASE(OID_GEN_BROADCAST_BYTES_XMIT) + MAKECASE(OID_GEN_BROADCAST_FRAMES_XMIT) + MAKECASE(OID_GEN_DIRECTED_BYTES_RCV) + MAKECASE(OID_GEN_DIRECTED_FRAMES_RCV) + MAKECASE(OID_GEN_MULTICAST_BYTES_RCV) + MAKECASE(OID_GEN_MULTICAST_FRAMES_RCV) + MAKECASE(OID_GEN_BROADCAST_BYTES_RCV) + MAKECASE(OID_GEN_BROADCAST_FRAMES_RCV) + MAKECASE(OID_GEN_RCV_CRC_ERROR) + MAKECASE(OID_GEN_TRANSMIT_QUEUE_LENGTH) + MAKECASE(OID_GEN_GET_TIME_CAPS) + MAKECASE(OID_GEN_GET_NETCARD_TIME) + MAKECASE(OID_GEN_NETCARD_LOAD) + MAKECASE(OID_GEN_DEVICE_PROFILE) + MAKECASE(OID_GEN_INIT_TIME_MS) + MAKECASE(OID_GEN_RESET_COUNTS) + MAKECASE(OID_GEN_MEDIA_SENSE_COUNTS) + MAKECASE(OID_GEN_VLAN_ID) + MAKECASE(OID_PNP_CAPABILITIES) + MAKECASE(OID_PNP_SET_POWER) + MAKECASE(OID_PNP_QUERY_POWER) + MAKECASE(OID_PNP_ADD_WAKE_UP_PATTERN) + MAKECASE(OID_PNP_REMOVE_WAKE_UP_PATTERN) + MAKECASE(OID_PNP_ENABLE_WAKE_UP) + MAKECASE(OID_802_3_PERMANENT_ADDRESS) + MAKECASE(OID_802_3_CURRENT_ADDRESS) + MAKECASE(OID_802_3_MULTICAST_LIST) + MAKECASE(OID_802_3_MAXIMUM_LIST_SIZE) + MAKECASE(OID_802_3_MAC_OPTIONS) + MAKECASE(OID_802_3_RCV_ERROR_ALIGNMENT) + MAKECASE(OID_802_3_XMIT_ONE_COLLISION) + MAKECASE(OID_802_3_XMIT_MORE_COLLISIONS) + MAKECASE(OID_802_3_XMIT_DEFERRED) + MAKECASE(OID_802_3_XMIT_MAX_COLLISIONS) + MAKECASE(OID_802_3_RCV_OVERRUN) + MAKECASE(OID_802_3_XMIT_UNDERRUN) + MAKECASE(OID_802_3_XMIT_HEARTBEAT_FAILURE) + MAKECASE(OID_802_3_XMIT_TIMES_CRS_LOST) + MAKECASE(OID_802_3_XMIT_LATE_COLLISIONS) + MAKECASE(OID_GEN_MACHINE_NAME) + MAKECASE(OID_TCP_TASK_OFFLOAD) + MAKECASE(OID_TCP_OFFLOAD_PARAMETERS) + MAKECASE(OID_OFFLOAD_ENCAPSULATION) + MAKECASE(OID_IP4_OFFLOAD_STATS) + MAKECASE(OID_IP6_OFFLOAD_STATS) + default: + { + static UCHAR buffer[9]; + UINT i; + for (i = 0; i < 8; ++i) + { + UCHAR nibble = (UCHAR)((oid >> (28 - i * 4)) & 0xf); + buffer[i] = hexdigit(nibble); + } + return (char *)buffer; + } + } +} + +/********************************************************** +Checker of valid size of provided wake-up patter +Return value: SUCCESS or kind of failure where the buffer is wrong +***********************************************************/ +static NDIS_STATUS ValidateWakeupPattern(PNDIS_PM_PACKET_PATTERN p, PULONG pValidSize) +{ + NDIS_STATUS status = NDIS_STATUS_BUFFER_TOO_SHORT; + + if (*pValidSize < sizeof(*p)) + { + *pValidSize = sizeof(*p); + } + else + { + ULONG ul = p->PatternOffset + p->PatternSize; + if (*pValidSize >= ul) status = NDIS_STATUS_SUCCESS; + *pValidSize = ul; + DPrintf(2, ("[%s] pattern of %d at %d, mask %d (%s)", + __FUNCTION__, p->PatternSize, p->PatternOffset, p->MaskSize, + status == NDIS_STATUS_SUCCESS ? "OK" : "Fail")); + } + return status; +} + + +/********************************************************** +Common handler of wake-up pattern addition +***********************************************************/ +NDIS_STATUS ParaNdis_OnAddWakeupPattern(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + NDIS_STATUS status; + PNDIS_PM_PACKET_PATTERN pPmPattern = (PNDIS_PM_PACKET_PATTERN) pOid->InformationBuffer; + ULONG ulValidSize = pOid->InformationBufferLength; + status = ValidateWakeupPattern(pPmPattern, &ulValidSize); + if (status == NDIS_STATUS_SUCCESS) + { + *pOid->pBytesRead = ulValidSize; + } + else + { + *pOid->pBytesRead = 0; + *pOid->pBytesNeeded = ulValidSize; + } + // TODO: Apply + return status; +} + +/********************************************************** +Common handler of wake-up pattern removal +***********************************************************/ +NDIS_STATUS ParaNdis_OnRemoveWakeupPattern(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + NDIS_STATUS status; + PNDIS_PM_PACKET_PATTERN pPmPattern = (PNDIS_PM_PACKET_PATTERN) pOid->InformationBuffer; + ULONG ulValidSize = pOid->InformationBufferLength; + status = ValidateWakeupPattern(pPmPattern, &ulValidSize); + if (status == NDIS_STATUS_SUCCESS) + { + *pOid->pBytesRead = ulValidSize; + } + else + { + *pOid->pBytesRead = 0; + *pOid->pBytesNeeded = ulValidSize; + } + return status; +} + +/********************************************************** +Common handler of wake-up enabling upon standby +***********************************************************/ +NDIS_STATUS ParaNdis_OnEnableWakeup(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + NDIS_STATUS status = ParaNdis_OidSetCopy(pOid, &pContext->ulEnableWakeup, sizeof(pContext->ulEnableWakeup)); + if (status == NDIS_STATUS_SUCCESS) + { + DPrintf(0, ("[%s] new value %lX", __FUNCTION__, pContext->ulEnableWakeup)); + } + return status; +} + +/********************************************************** +Dummy implementation +***********************************************************/ +NDIS_STATUS ParaNdis_OnSetLookahead(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + return ParaNdis_OidSetCopy(pOid, &pContext->DummyLookAhead, sizeof(pContext->DummyLookAhead)); +} + +NDIS_STATUS ParaNdis_OnSetVlanId(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + NDIS_STATUS status = NDIS_STATUS_NOT_SUPPORTED; + if (IsVlanSupported(pContext)) + { + status = ParaNdis_OidSetCopy(pOid, &pContext->VlanId, sizeof(pContext->VlanId)); + pContext->VlanId &= 0xfff; + DPrintf(0, ("[%s] new value %d on MAC %X", __FUNCTION__, pContext->VlanId, pContext->CurrentMacAddress[5])); + ParaNdis_DeviceFiltersUpdateVlanId(pContext); + } + return status; +} + +/********************************************************** +Retrieves support rules for specific OID +***********************************************************/ +void ParaNdis_GetOidSupportRules(NDIS_OID oid, tOidWhatToDo *pRule, const tOidWhatToDo *Table) +{ + static const tOidWhatToDo defaultRule = { 0, 0, 0, 0, 0, NULL, "Unknown OID" }; + UINT i; + *pRule = defaultRule; + pRule->oid = oid; + + for (i = 0; Table[i].oid != 0; ++i) + { + if (Table[i].oid == oid) + { + *pRule = Table[i]; + break; + } + } + pRule->name = ParaNdis_OidName(oid); +} diff --git a/drivers/network/dd/netkvm/Common/ParaNdis-Oid.h b/drivers/network/dd/netkvm/Common/ParaNdis-Oid.h new file mode 100644 index 00000000000..aac453ab97c --- /dev/null +++ b/drivers/network/dd/netkvm/Common/ParaNdis-Oid.h @@ -0,0 +1,104 @@ +/* + * This file contains common for NDIS5/NDIS6 definition, + * related to OID support + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#ifndef PARANDIS_COMMON_OID_H +#define PARANDIS_COMMON_OID_H + +#include "ndis56common.h" + +/********************************************************** +Wrapper for all the data, related to any OID request +***********************************************************/ +typedef struct _tagOidDesc +{ + NDIS_OID Oid; // oid code + ULONG ulToDoFlags; // combination of eOidHelperFlags + PVOID InformationBuffer; // buffer received from NDIS + UINT InformationBufferLength; // its length + PUINT pBytesWritten; // OUT for query/method + PUINT pBytesNeeded; // OUT for query/set/method when length of buffer is wrong + PUINT pBytesRead; // OUT for set/method + PVOID Reserved; // Reserved for pending requests +} tOidDesc; + +typedef NDIS_STATUS (*OIDHANDLERPROC)(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); + +typedef struct _tagOidWhatToDo +{ + NDIS_OID oid; // oid number + int nEntryLevel; // do print on entry level + int nExitFailLevel; // do print on exit if failed + int nExitOKLevel; // do print on exit if OK + UINT Flags; + OIDHANDLERPROC OidSetProc; // procedure to call on SET + const char *name; // printable name +}tOidWhatToDo; + + +typedef enum _tageOidHelperFlags { + ohfQuery = 1, // can be queried + ohfSet = 2, // can be set + ohfQuerySet = ohfQuery | ohfSet, + ohfQueryStatOnly = 4, // redirect query stat to query + ohfQueryStat = ohfQueryStatOnly | ohfQuery, + ohfQuery3264 = 8 | ohfQuery, // convert 64 to 32 on query + ohfQueryStat3264 = 8 | ohfQueryStat, // convert 64 to 32 on query stat + ohfSetLessOK = 16, // on set: if buffer is smaller, cleanup and copy + ohfSetMoreOK = 32 // on set: if buffer is larger, copy anyway +} eOidHelperFlags; + + + + +/********************************************************** +Common procedures related to OID support +***********************************************************/ +NDIS_STATUS ParaNdis_OidQueryCommon(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +NDIS_STATUS ParaNdis_OidQueryCopy(tOidDesc *pOid, PVOID pInfo, ULONG ulSize, BOOLEAN bFreeInfo); +static NDIS_STATUS ParaNdis_OidQuery(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +NDIS_STATUS ParaNdis_OnOidSetMulticastList(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +NDIS_STATUS ParaNdis_OnSetPacketFilter(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +NDIS_STATUS ParaNdis_OnAddWakeupPattern(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +NDIS_STATUS ParaNdis_OnRemoveWakeupPattern(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +NDIS_STATUS ParaNdis_OnEnableWakeup(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +NDIS_STATUS ParaNdis_OnSetLookahead(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +NDIS_STATUS ParaNdis_OnSetVlanId(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +NDIS_STATUS ParaNdis_OidSetCopy(tOidDesc *pOid, PVOID pDest, ULONG ulSize); +void ParaNdis_FillPowerCapabilities(PNDIS_PNP_CAPABILITIES pCaps); +void ParaNdis_GetOidSupportRules(NDIS_OID oid, tOidWhatToDo *pRule, const tOidWhatToDo *Table); + + +const char *ParaNdis_OidName(NDIS_OID oid); +/********************************************************** +Procedures to be implemented in NDIS5/NDIS6 specific modules +***********************************************************/ +void ParaNdis_GetSupportedOid(PVOID *pOidsArray, PULONG pLength); +NDIS_STATUS ParaNdis_OnSetPower(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); + +#endif diff --git a/drivers/network/dd/netkvm/Common/ParaNdis-VirtIO.c b/drivers/network/dd/netkvm/Common/ParaNdis-VirtIO.c new file mode 100644 index 00000000000..a1525502696 --- /dev/null +++ b/drivers/network/dd/netkvm/Common/ParaNdis-VirtIO.c @@ -0,0 +1,389 @@ +/* + * This file contains NDIS driver VirtIO callbacks + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "ndis56common.h" + +///////////////////////////////////////////////////////////////////////////////////// +// +// ReadVirtIODeviceRegister\WriteVirtIODeviceRegister +// NDIS specific implementation of the IO and memory space read\write +// +// The lower 64k of memory is never mapped so we can use the same routines +// for both port I/O and memory access and use the address alone to decide +// which space to use. +///////////////////////////////////////////////////////////////////////////////////// + +#define PORT_MASK 0xFFFF + +static u32 ReadVirtIODeviceRegister(ULONG_PTR ulRegister) +{ + ULONG ulValue; + + if (ulRegister & ~PORT_MASK) { + NdisReadRegisterUlong(ulRegister, &ulValue); + } else { + NdisRawReadPortUlong(ulRegister, &ulValue); + } + + DPrintf(6, ("[%s]R[%x]=%x", __FUNCTION__, (ULONG)ulRegister, ulValue)); + return ulValue; +} + +static void WriteVirtIODeviceRegister(ULONG_PTR ulRegister, u32 ulValue) +{ + DPrintf(6, ("[%s]R[%x]=%x", __FUNCTION__, (ULONG)ulRegister, ulValue)); + + if (ulRegister & ~PORT_MASK) { + NdisWriteRegisterUlong((PULONG)ulRegister, ulValue); + } else { + NdisRawWritePortUlong(ulRegister, ulValue); + } +} + +static u8 ReadVirtIODeviceByte(ULONG_PTR ulRegister) +{ + u8 bValue; + + if (ulRegister & ~PORT_MASK) { + NdisReadRegisterUchar(ulRegister, &bValue); + } else { + NdisRawReadPortUchar(ulRegister, &bValue); + } + + DPrintf(6, ("[%s]R[%x]=%x", __FUNCTION__, (ULONG)ulRegister, bValue)); + return bValue; +} + +static void WriteVirtIODeviceByte(ULONG_PTR ulRegister, u8 bValue) +{ + DPrintf(6, ("[%s]R[%x]=%x", __FUNCTION__, (ULONG)ulRegister, bValue)); + + if (ulRegister & ~PORT_MASK) { + NdisWriteRegisterUchar((PUCHAR)ulRegister, bValue); + } else { + NdisRawWritePortUchar(ulRegister, bValue); + } +} + +static u16 ReadVirtIODeviceWord(ULONG_PTR ulRegister) +{ + u16 wValue; + + if (ulRegister & ~PORT_MASK) { + NdisReadRegisterUshort(ulRegister, &wValue); + } else { + NdisRawReadPortUshort(ulRegister, &wValue); + } + + DPrintf(6, ("[%s]R[%x]=%x\n", __FUNCTION__, (ULONG)ulRegister, wValue)); + return wValue; +} + +static void WriteVirtIODeviceWord(ULONG_PTR ulRegister, u16 wValue) +{ +#if 1 + if (ulRegister & ~PORT_MASK) { + NdisWriteRegisterUshort((PUSHORT)ulRegister, wValue); + } else { + NdisRawWritePortUshort(ulRegister, wValue); + } +#else + // test only to cause long TX waiting queue of NDIS packets + // to recognize it and request for reset via Hang handler + static int nCounterToFail = 0; + static const int StartFail = 200, StopFail = 600; + BOOLEAN bFail = FALSE; + DPrintf(6, ("%s> R[%x] = %x\n", __FUNCTION__, (ULONG)ulRegister, wValue)); + if ((ulRegister & 0x1F) == 0x10) + { + nCounterToFail++; + bFail = nCounterToFail >= StartFail && nCounterToFail < StopFail; + } + if (!bFail) NdisRawWritePortUshort(ulRegister, wValue); + else + { + DPrintf(0, ("%s> FAILING R[%x] = %x\n", __FUNCTION__, (ULONG)ulRegister, wValue)); + } +#endif +} + +static void *mem_alloc_contiguous_pages(void *context, size_t size) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context; + PVOID retVal = NULL; + ULONG i; + + /* find the first unused memory range of the requested size */ + for (i = 0; i < MAX_NUM_OF_QUEUES; i++) { + if (pContext->SharedMemoryRanges[i].pBase != NULL && + pContext->SharedMemoryRanges[i].bUsed == FALSE && + pContext->SharedMemoryRanges[i].uLength == (ULONG)size) { + retVal = pContext->SharedMemoryRanges[i].pBase; + pContext->SharedMemoryRanges[i].bUsed = TRUE; + break; + } + } + + if (!retVal) { + /* find the first null memory range descriptor and allocate */ + for (i = 0; i < MAX_NUM_OF_QUEUES; i++) { + if (pContext->SharedMemoryRanges[i].pBase == NULL) { + break; + } + } + if (i < MAX_NUM_OF_QUEUES) { + NdisMAllocateSharedMemory( + pContext->MiniportHandle, + (ULONG)size, + TRUE /* Cached */, + &pContext->SharedMemoryRanges[i].pBase, + &pContext->SharedMemoryRanges[i].BasePA); + retVal = pContext->SharedMemoryRanges[i].pBase; + if (retVal) { + NdisZeroMemory(retVal, size); + pContext->SharedMemoryRanges[i].uLength = (ULONG)size; + pContext->SharedMemoryRanges[i].bUsed = TRUE; + } + } + } + + if (retVal) { + DPrintf(6, ("[%s] returning %p, size %x\n", __FUNCTION__, retVal, (ULONG)size)); + } else { + DPrintf(0, ("[%s] failed to allocate size %x\n", __FUNCTION__, (ULONG)size)); + } + return retVal; +} + +static void mem_free_contiguous_pages(void *context, void *virt) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context; + ULONG i; + + for (i = 0; i < MAX_NUM_OF_QUEUES; i++) { + if (pContext->SharedMemoryRanges[i].pBase == virt) { + pContext->SharedMemoryRanges[i].bUsed = FALSE; + break; + } + } + + if (i < MAX_NUM_OF_QUEUES) { + DPrintf(6, ("[%s] freed %p at index %d\n", __FUNCTION__, virt, i)); + } else { + DPrintf(0, ("[%s] failed to free %p\n", __FUNCTION__, virt)); + } +} + +static ULONGLONG mem_get_physical_address(void *context, void *virt) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context; + ULONG_PTR uAddr = (ULONG_PTR)virt; + ULONG i; + + for (i = 0; i < MAX_NUM_OF_QUEUES; i++) { + ULONG_PTR uBase = (ULONG_PTR)pContext->SharedMemoryRanges[i].pBase; + if (uAddr >= uBase && uAddr < (uBase + pContext->SharedMemoryRanges[i].uLength)) { + ULONGLONG retVal = pContext->SharedMemoryRanges[i].BasePA.QuadPart + (uAddr - uBase); + + DPrintf(6, ("[%s] translated %p to %I64X\n", __FUNCTION__, virt, retVal)); + return retVal; + } + } + + DPrintf(0, ("[%s] failed to translate %p\n", __FUNCTION__, virt)); + return 0; +} + +static void *mem_alloc_nonpaged_block(void *context, size_t size) +{ + PVOID retVal; + + if (NdisAllocateMemoryWithTag( + &retVal, + (UINT)size, + PARANDIS_MEMORY_TAG) != NDIS_STATUS_SUCCESS) { + retVal = NULL; + } + + if (retVal) { + NdisZeroMemory(retVal, size); + DPrintf(6, ("[%s] returning %p, len %x\n", __FUNCTION__, retVal, (ULONG)size)); + } else { + DPrintf(0, ("[%s] failed to allocate size %x\n", __FUNCTION__, (ULONG)size)); + } + return retVal; +} + +static void mem_free_nonpaged_block(void *context, void *addr) +{ + UNREFERENCED_PARAMETER(context); + + NdisFreeMemory(addr, 0, 0); + DPrintf(6, ("[%s] freed %p\n", __FUNCTION__, addr)); +} + +static int PCIReadConfig(PPARANDIS_ADAPTER pContext, + int where, + void *buffer, + size_t length) +{ + ULONG read; + + read = NdisReadPciSlotInformation( + pContext->MiniportHandle, + 0 /* SlotNumber */, + where, + buffer, + (ULONG)length); + + if (read == length) { + DPrintf(6, ("[%s] read %d bytes at %d\n", __FUNCTION__, read, where)); + return 0; + } else { + DPrintf(0, ("[%s] failed to read %d bytes at %d\n", __FUNCTION__, read, where)); + return -1; + } +} + +static int pci_read_config_byte(void *context, int where, u8 *bVal) +{ + return PCIReadConfig((PPARANDIS_ADAPTER)context, where, bVal, sizeof(*bVal)); +} + +static int pci_read_config_word(void *context, int where, u16 *wVal) +{ + return PCIReadConfig((PPARANDIS_ADAPTER)context, where, wVal, sizeof(*wVal)); +} + +static int pci_read_config_dword(void *context, int where, u32 *dwVal) +{ + return PCIReadConfig((PPARANDIS_ADAPTER)context, where, dwVal, sizeof(*dwVal)); +} + +static size_t pci_get_resource_len(void *context, int bar) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context; + + if (bar < PCI_TYPE0_ADDRESSES) { + return pContext->AdapterResources.PciBars[bar].uLength; + } + + DPrintf(0, ("[%s] queried invalid BAR %d\n", __FUNCTION__, bar)); + return 0; +} + +static void *pci_map_address_range(void *context, int bar, size_t offset, size_t maxlen) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context; + + if (bar < PCI_TYPE0_ADDRESSES) { + tBusResource *pRes = &pContext->AdapterResources.PciBars[bar]; + if (pRes->pBase == NULL) { + /* BAR not mapped yet */ + if (pRes->bPortSpace) { + if (NDIS_STATUS_SUCCESS == NdisMRegisterIoPortRange( + &pRes->pBase, + pContext->MiniportHandle, + pRes->BasePA.LowPart, + pRes->uLength)) { + DPrintf(6, ("[%s] mapped port BAR at %x\n", __FUNCTION__, pRes->BasePA.LowPart)); + } else { + pRes->pBase = NULL; + DPrintf(0, ("[%s] failed to map port BAR at %x\n", __FUNCTION__, pRes->BasePA.LowPart)); + } + } else { + if (NDIS_STATUS_SUCCESS == NdisMMapIoSpace( + &pRes->pBase, + pContext->MiniportHandle, + pRes->BasePA, + pRes->uLength)) { + DPrintf(6, ("[%s] mapped memory BAR at %I64x\n", __FUNCTION__, pRes->BasePA.QuadPart)); + } else { + pRes->pBase = NULL; + DPrintf(0, ("[%s] failed to map memory BAR at %I64x\n", __FUNCTION__, pRes->BasePA.QuadPart)); + } + } + } + if (pRes->pBase != NULL && offset < pRes->uLength) { + if (pRes->bPortSpace) { + /* use physical address for port I/O */ + return (PUCHAR)(ULONG_PTR)pRes->BasePA.LowPart + offset; + } else { + /* use virtual address for memory I/O */ + return (PUCHAR)pRes->pBase + offset; + } + } else { + DPrintf(0, ("[%s] failed to get map BAR %d, offset %x\n", __FUNCTION__, bar, offset)); + } + } else { + DPrintf(0, ("[%s] queried invalid BAR %d\n", __FUNCTION__, bar)); + } + + return NULL; +} + +static u16 vdev_get_msix_vector(void *context, int queue) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)context; + u16 vector = VIRTIO_MSI_NO_VECTOR; + + /* we don't run on MSI support so this will never be true */ + if (pContext->bUsingMSIX && queue >= 0) { + vector = (u16)pContext->AdapterResources.Vector; + } + + return vector; +} + +static void vdev_sleep(void *context, unsigned int msecs) +{ + UNREFERENCED_PARAMETER(context); + + NdisMSleep(1000 * msecs); +} + +VirtIOSystemOps ParaNdisSystemOps = { + /* .vdev_read_byte = */ ReadVirtIODeviceByte, + /* .vdev_read_word = */ ReadVirtIODeviceWord, + /* .vdev_read_dword = */ ReadVirtIODeviceRegister, + /* .vdev_write_byte = */ WriteVirtIODeviceByte, + /* .vdev_write_word = */ WriteVirtIODeviceWord, + /* .vdev_write_dword = */ WriteVirtIODeviceRegister, + /* .mem_alloc_contiguous_pages = */ mem_alloc_contiguous_pages, + /* .mem_free_contiguous_pages = */ mem_free_contiguous_pages, + /* .mem_get_physical_address = */ mem_get_physical_address, + /* .mem_alloc_nonpaged_block = */ mem_alloc_nonpaged_block, + /* .mem_free_nonpaged_block = */ mem_free_nonpaged_block, + /* .pci_read_config_byte = */ pci_read_config_byte, + /* .pci_read_config_word = */ pci_read_config_word, + /* .pci_read_config_dword = */ pci_read_config_dword, + /* .pci_get_resource_len = */ pci_get_resource_len, + /* .pci_map_address_range = */ pci_map_address_range, + /* .vdev_get_msix_vector = */ vdev_get_msix_vector, + /*.vdev_sleep = */ vdev_sleep, +}; diff --git a/drivers/network/dd/netkvm/Common/ethernetutils.h b/drivers/network/dd/netkvm/Common/ethernetutils.h new file mode 100644 index 00000000000..5d3a5e7d578 --- /dev/null +++ b/drivers/network/dd/netkvm/Common/ethernetutils.h @@ -0,0 +1,123 @@ +/* + * Contains common Ethernet-related definition, not defined in NDIS + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#ifndef _ETHERNET_UTILS_H +#define _ETHERNET_UTILS_H + +// assuming included + + +#define ETH_IS_LOCALLY_ADMINISTERED(Address) \ + (BOOLEAN)(((PUCHAR)(Address))[0] & ((UCHAR)0x02)) + +#define ETH_IS_EMPTY(Address) \ + ((((PUCHAR)(Address))[0] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[1] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[2] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[3] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[4] == ((UCHAR)0x00)) && (((PUCHAR)(Address))[5] == ((UCHAR)0x00))) + +#define ETH_HAS_PRIO_HEADER(Address) \ + (((PUCHAR)(Address))[12] == ((UCHAR)0x81) && ((PUCHAR)(Address))[13] == ((UCHAR)0x00)) + +#include +typedef struct _ETH_HEADER +{ + UCHAR DstAddr[ETH_LENGTH_OF_ADDRESS]; + UCHAR SrcAddr[ETH_LENGTH_OF_ADDRESS]; + USHORT EthType; +} ETH_HEADER, *PETH_HEADER; +#include + +#define ETH_HEADER_SIZE (sizeof(ETH_HEADER)) +#define ETH_MIN_PACKET_SIZE 60 +#define ETH_PRIORITY_HEADER_OFFSET 12 +#define ETH_PRIORITY_HEADER_SIZE 4 + + +static void FORCEINLINE SetPriorityData(UCHAR *pDest, ULONG priority, ULONG VlanID) +{ + pDest[0] = 0x81; + pDest[2] = (UCHAR)(priority << 5); + pDest[2] |= (UCHAR)(VlanID >> 8); + pDest[3] |= (UCHAR)VlanID; +} + +typedef enum _tag_eInspectedPacketType +{ + iptUnicast, + iptBroadcast, + iptMulticast, + iptInvalid +}eInspectedPacketType; + +// IP Header RFC 791 +typedef struct _tagIPv4Header { + UCHAR ip_verlen; // length in 32-bit units(low nibble), version (high nibble) + UCHAR ip_tos; // Type of service + USHORT ip_length; // Total length + USHORT ip_id; // Identification + USHORT ip_offset; // fragment offset and flags + UCHAR ip_ttl; // Time to live + UCHAR ip_protocol; // Protocol + USHORT ip_xsum; // Header checksum + ULONG ip_src; // Source IP address + ULONG ip_dest; // Destination IP address +} IPv4Header; + +// TCP header RFC 793 +typedef struct _tagTCPHeader { + USHORT tcp_src; // Source port + USHORT tcp_dest; // Destination port + ULONG tcp_seq; // Sequence number + ULONG tcp_ack; // Ack number + USHORT tcp_flags; // header length and flags + USHORT tcp_window; // Window size + USHORT tcp_xsum; // Checksum + USHORT tcp_urgent; // Urgent +}TCPHeader; + + +// UDP Header RFC 768 +typedef struct _tagUDPHeader { + USHORT udp_src; // Source port + USHORT udp_dest; // Destination port + USHORT udp_length; // length of datagram + USHORT udp_xsum; // checksum +}UDPHeader; + + + +#define TCP_CHECKSUM_OFFSET 16 +#define UDP_CHECKSUM_OFFSET 6 +#define MAX_IPV4_HEADER_SIZE 60 +#define MAX_TCP_HEADER_SIZE 60 + +static __inline USHORT swap_short(USHORT us) +{ + return (us << 8) | (us >> 8); +} + + +#endif diff --git a/drivers/network/dd/netkvm/Common/kdebugprint.h b/drivers/network/dd/netkvm/Common/kdebugprint.h new file mode 100644 index 00000000000..16d4ea0f4c3 --- /dev/null +++ b/drivers/network/dd/netkvm/Common/kdebugprint.h @@ -0,0 +1,108 @@ +/* + * This file contains debug-related definitions for kernel driver + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/********************************************************************** +WARNING: this file is incompatible with Logo requirements +TODO: Optional WPP technique +**********************************************************************/ + +#ifndef _K_DEBUG_PRINT_H +#define _K_DEBUG_PRINT_H + +extern int nDebugLevel; +extern int bDebugPrint; + + +typedef void (*DEBUGPRINTFUNC)(const char *fmt, ...); +extern DEBUGPRINTFUNC pDebugPrint; + +void _LogOutEntry(int level, const char *s); +void _LogOutExitValue(int level, const char *s, ULONG value); +void _LogOutString(int level, const char *s); + +#define DEBUG_ENTRY(level) _LogOutEntry(level, __FUNCTION__) +#define DEBUG_EXIT_STATUS(level, status) _LogOutExitValue(level, __FUNCTION__, status) +#define DPrintFunctionName(Level) _LogOutString(Level, __FUNCTION__) + + +#ifndef WPP_EVENT_TRACING + +#define WPP_INIT_TRACING(a,b) +#define WPP_CLEANUP(a) + +#define MAX_DEBUG_LEVEL 1 + +#define DPrintf(Level, Fmt) { if ( (Level) > MAX_DEBUG_LEVEL || (Level) > nDebugLevel || !bDebugPrint ) {} else { pDebugPrint Fmt; } } + +#define DPrintfBypass(Level, Fmt) DPrintf(Level, Fmt) + +#else + +//#define WPP_USE_BYPASS + + +#define DPrintfAnyway(Level, Fmt) \ +{ \ + if (bDebugPrint && (Level) <= nDebugLevel) \ + { \ + pDebugPrint Fmt; \ + } \ +} + +//{05F77115-E57E-49bf-90DF-C0E6B6478E5F} +#define WPP_CONTROL_GUIDS \ + WPP_DEFINE_CONTROL_GUID(NetKVM, (05F77115,E57E,49bf,90DF,C0E6B6478E5F), \ + WPP_DEFINE_BIT(TRACE_DEBUG)\ + ) + + +#define WPP_LEVEL_ENABLED(LEVEL) \ + (nDebugLevel >= (LEVEL)) + +#define WPP_LEVEL_LOGGER(LEVEL) (WPP_CONTROL(WPP_BIT_ ## TRACE_DEBUG).Logger), + + +#if WPP_USE_BYPASS +#define DPrintfBypass(Level, Fmt) DPrintfAnyway(Level, Fmt) +#else +#define DPrintfBypass(Level, Fmt) +#endif + +#define WPP_PRIVATE_ENABLE_CALLBACK WppEnableCallback + +extern VOID WppEnableCallback( + __in LPCGUID Guid, + __in __int64 Logger, + __in BOOLEAN Enable, + __in ULONG Flags, + __in UCHAR Level); + + +#endif +#endif diff --git a/drivers/network/dd/netkvm/Common/ndis56common.h b/drivers/network/dd/netkvm/Common/ndis56common.h new file mode 100644 index 00000000000..7cf4236be65 --- /dev/null +++ b/drivers/network/dd/netkvm/Common/ndis56common.h @@ -0,0 +1,892 @@ +/* + * This file contains general definitions for VirtIO network adapter driver, + * common for both NDIS5 and NDIS6 + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#ifndef PARANDIS_56_COMMON_H +#define PARANDIS_56_COMMON_H + +//#define PARANDIS_TEST_TX_KICK_ALWAYS + +#if defined(OFFLOAD_UNIT_TEST) +#include +#include + +#define ETH_LENGTH_OF_ADDRESS 6 +#define DoPrint(fmt, ...) printf(fmt##"\n", __VA_ARGS__) +#define DPrintf(a,b) DoPrint b +#define RtlOffsetToPointer(B,O) ((PCHAR)( ((PCHAR)(B)) + ((ULONG_PTR)(O)) )) + +#include "ethernetutils.h" +#endif //+OFFLOAD_UNIT_TEST + +#if !defined(OFFLOAD_UNIT_TEST) + +#if !defined(RtlOffsetToPointer) +#define RtlOffsetToPointer(Base,Offset) ((PCHAR)(((PCHAR)(Base))+((ULONG_PTR)(Offset)))) +#endif + +#if !defined(RtlPointerToOffset) +#define RtlPointerToOffset(Base,Pointer) ((ULONG)(((PCHAR)(Pointer))-((PCHAR)(Base)))) +#endif + + +#include +#include "osdep.h" +#include "kdebugprint.h" +#include "ethernetutils.h" +#include "virtio_pci.h" +#include "VirtIO.h" +#include "virtio_ring.h" +#include "IONetDescriptor.h" +#include "DebugData.h" + +// those stuff defined in NDIS +//NDIS_MINIPORT_MAJOR_VERSION +//NDIS_MINIPORT_MINOR_VERSION +// those stuff defined in build environment +// PARANDIS_MAJOR_DRIVER_VERSION +// PARANDIS_MINOR_DRIVER_VERSION + +#if !defined(NDIS_MINIPORT_MAJOR_VERSION) || !defined(NDIS_MINIPORT_MINOR_VERSION) +#error "Something is wrong with NDIS environment" +#endif + +//define to see when the status register is unreadable(see ParaNdis_ResetVirtIONetDevice) +//#define VIRTIO_RESET_VERIFY + +//define to if hardware raise interrupt on error (see ParaNdis_DPCWorkBody) +//#define VIRTIO_SIGNAL_ERROR + +// define if qemu supports logging to static IO port for synchronization +// of driver output with qemu printouts; in this case define the port number +// #define VIRTIO_DBG_USE_IOPORT 0x99 + +// to be set to real limit later +#define MAX_RX_LOOPS 1000 + +// maximum number of virtio queues used by the driver +#define MAX_NUM_OF_QUEUES 3 + +/* The feature bitmap for virtio net */ +#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ +#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ +#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ +#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ +#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ +#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ +#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ +#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */ +#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */ +#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ +#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ +#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ +#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can handle merged Rx buffers and requires bigger header for that. */ +#define VIRTIO_NET_F_STATUS 16 +#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */ +#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ +#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ +#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */ + +#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ + +#define VIRTIO_NET_INVALID_INTERRUPT_STATUS 0xFF + +#define PARANDIS_MULTICAST_LIST_SIZE 32 +#define PARANDIS_MEMORY_TAG '5muQ' +#define PARANDIS_FORMAL_LINK_SPEED (pContext->ulFormalLinkSpeed) +#define PARANDIS_MAXIMUM_TRANSMIT_SPEED PARANDIS_FORMAL_LINK_SPEED +#define PARANDIS_MAXIMUM_RECEIVE_SPEED PARANDIS_FORMAL_LINK_SPEED +#define PARANDIS_MIN_LSO_SEGMENTS 2 +// reported +#define PARANDIS_MAX_LSO_SIZE 0xF800 + +#define PARANDIS_UNLIMITED_PACKETS_TO_INDICATE (~0ul) + +extern VirtIOSystemOps ParaNdisSystemOps; + +typedef enum _tagInterruptSource +{ + isControl = VIRTIO_PCI_ISR_CONFIG, + isReceive = 0x10, + isTransmit = 0x20, + isUnknown = 0x40, + isBothTransmitReceive = isReceive | isTransmit, + isAny = isReceive | isTransmit | isControl | isUnknown, + isDisable = 0x80 +}tInterruptSource; + +static const ULONG PARANDIS_PACKET_FILTERS = + NDIS_PACKET_TYPE_DIRECTED | + NDIS_PACKET_TYPE_MULTICAST | + NDIS_PACKET_TYPE_BROADCAST | + NDIS_PACKET_TYPE_PROMISCUOUS | + NDIS_PACKET_TYPE_ALL_MULTICAST; + +typedef VOID (*ONPAUSECOMPLETEPROC)(VOID *); + + +typedef enum _tagSendReceiveState +{ + srsDisabled = 0, // initial state + srsPausing, + srsEnabled +} tSendReceiveState; + +typedef struct _tagBusResource { + NDIS_PHYSICAL_ADDRESS BasePA; + ULONG uLength; + PVOID pBase; + BOOLEAN bPortSpace; + BOOLEAN bUsed; +} tBusResource; + +typedef struct _tagAdapterResources +{ + tBusResource PciBars[PCI_TYPE0_ADDRESSES]; + ULONG Vector; + ULONG Level; + KAFFINITY Affinity; + ULONG InterruptFlags; +} tAdapterResources; + +typedef enum _tagOffloadSettingsBit +{ + osbT4IpChecksum = (1 << 0), + osbT4TcpChecksum = (1 << 1), + osbT4UdpChecksum = (1 << 2), + osbT4TcpOptionsChecksum = (1 << 3), + osbT4IpOptionsChecksum = (1 << 4), + osbT4Lso = (1 << 5), + osbT4LsoIp = (1 << 6), + osbT4LsoTcp = (1 << 7), + osbT4RxTCPChecksum = (1 << 8), + osbT4RxTCPOptionsChecksum = (1 << 9), + osbT4RxIPChecksum = (1 << 10), + osbT4RxIPOptionsChecksum = (1 << 11), + osbT4RxUDPChecksum = (1 << 12), + osbT6TcpChecksum = (1 << 13), + osbT6UdpChecksum = (1 << 14), + osbT6TcpOptionsChecksum = (1 << 15), + osbT6IpExtChecksum = (1 << 16), + osbT6Lso = (1 << 17), + osbT6LsoIpExt = (1 << 18), + osbT6LsoTcpOptions = (1 << 19), + osbT6RxTCPChecksum = (1 << 20), + osbT6RxTCPOptionsChecksum = (1 << 21), + osbT6RxUDPChecksum = (1 << 22), + osbT6RxIpExtChecksum = (1 << 23), +}tOffloadSettingsBit; + +typedef struct _tagOffloadSettingsFlags +{ + ULONG fTxIPChecksum : 1; + ULONG fTxTCPChecksum : 1; + ULONG fTxUDPChecksum : 1; + ULONG fTxTCPOptions : 1; + ULONG fTxIPOptions : 1; + ULONG fTxLso : 1; + ULONG fTxLsoIP : 1; + ULONG fTxLsoTCP : 1; + ULONG fRxIPChecksum : 1; + ULONG fRxTCPChecksum : 1; + ULONG fRxUDPChecksum : 1; + ULONG fRxTCPOptions : 1; + ULONG fRxIPOptions : 1; + ULONG fTxTCPv6Checksum : 1; + ULONG fTxUDPv6Checksum : 1; + ULONG fTxTCPv6Options : 1; + ULONG fTxIPv6Ext : 1; + ULONG fTxLsov6 : 1; + ULONG fTxLsov6IP : 1; + ULONG fTxLsov6TCP : 1; + ULONG fRxTCPv6Checksum : 1; + ULONG fRxUDPv6Checksum : 1; + ULONG fRxTCPv6Options : 1; + ULONG fRxIPv6Ext : 1; +}tOffloadSettingsFlags; + + +typedef struct _tagOffloadSettings +{ + /* current value of enabled offload features */ + tOffloadSettingsFlags flags; + /* load once, do not modify - bitmask of offload features, enabled in configuration */ + ULONG flagsValue; + ULONG ipHeaderOffset; + ULONG maxPacketSize; +}tOffloadSettings; + +typedef struct _tagChecksumCheckResult +{ + union + { + struct + { + ULONG TcpFailed :1; + ULONG UdpFailed :1; + ULONG IpFailed :1; + ULONG TcpOK :1; + ULONG UdpOK :1; + ULONG IpOK :1; + } flags; + ULONG value; + }; +}tChecksumCheckResult; + +/* +for simplicity, we use for NDIS5 the same statistics as native NDIS6 uses +*/ +typedef struct _tagNdisStatistics +{ + ULONG64 ifHCInOctets; + ULONG64 ifHCInUcastPkts; + ULONG64 ifHCInUcastOctets; + ULONG64 ifHCInMulticastPkts; + ULONG64 ifHCInMulticastOctets; + ULONG64 ifHCInBroadcastPkts; + ULONG64 ifHCInBroadcastOctets; + ULONG64 ifInDiscards; + ULONG64 ifInErrors; + ULONG64 ifHCOutOctets; + ULONG64 ifHCOutUcastPkts; + ULONG64 ifHCOutUcastOctets; + ULONG64 ifHCOutMulticastPkts; + ULONG64 ifHCOutMulticastOctets; + ULONG64 ifHCOutBroadcastPkts; + ULONG64 ifHCOutBroadcastOctets; + ULONG64 ifOutDiscards; + ULONG64 ifOutErrors; +}NDIS_STATISTICS_INFO; + +typedef PNDIS_PACKET tPacketType; +typedef PNDIS_PACKET tPacketHolderType; +typedef PNDIS_PACKET tPacketIndicationType; + +typedef struct _tagNdisOffloadParams +{ + UCHAR IPv4Checksum; + UCHAR TCPIPv4Checksum; + UCHAR UDPIPv4Checksum; + UCHAR LsoV1; + UCHAR LsoV2IPv4; + UCHAR TCPIPv6Checksum; + UCHAR UDPIPv6Checksum; + UCHAR LsoV2IPv6; +}NDIS_OFFLOAD_PARAMETERS; + +//#define UNIFY_LOCKS + +typedef struct _tagOurCounters +{ + UINT nReusedRxBuffers; + UINT nPrintDiagnostic; + ULONG64 prevIn; + UINT nRxInactivity; +}tOurCounters; + +typedef struct _tagMaxPacketSize +{ + UINT nMaxDataSize; + UINT nMaxFullSizeOS; + UINT nMaxFullSizeHwTx; + UINT nMaxFullSizeHwRx; +}tMaxPacketSize; + +typedef struct _tagCompletePhysicalAddress +{ + PHYSICAL_ADDRESS Physical; + PVOID Virtual; + ULONG size; + ULONG IsCached : 1; + ULONG IsTX : 1; +} tCompletePhysicalAddress; + +typedef struct _tagMulticastData +{ + ULONG nofMulticastEntries; + UCHAR MulticastList[ETH_LENGTH_OF_ADDRESS * PARANDIS_MULTICAST_LIST_SIZE]; +}tMulticastData; + +typedef struct _tagIONetDescriptor { + LIST_ENTRY listEntry; + tCompletePhysicalAddress HeaderInfo; + tCompletePhysicalAddress DataInfo; + tPacketHolderType pHolder; + PVOID ReferenceValue; + UINT nofUsedBuffers; +} IONetDescriptor, * pIONetDescriptor; + +typedef void (*tReuseReceiveBufferProc)(void *pContext, pIONetDescriptor pDescriptor); + +typedef struct _tagPARANDIS_ADAPTER +{ + NDIS_HANDLE DriverHandle; + NDIS_HANDLE MiniportHandle; + NDIS_EVENT ResetEvent; + tAdapterResources AdapterResources; + tBusResource SharedMemoryRanges[MAX_NUM_OF_QUEUES]; + + VirtIODevice IODevice; + BOOLEAN bIODeviceInitialized; + ULONGLONG ullHostFeatures; + ULONGLONG ullGuestFeatures; + + LARGE_INTEGER LastTxCompletionTimeStamp; +#ifdef PARANDIS_DEBUG_INTERRUPTS + LARGE_INTEGER LastInterruptTimeStamp; +#endif + BOOLEAN bConnected; + BOOLEAN bEnableInterruptHandlingDPC; + BOOLEAN bEnableInterruptChecking; + BOOLEAN bDoInterruptRecovery; + BOOLEAN bDoSupportPriority; + BOOLEAN bDoHwPacketFiltering; + BOOLEAN bUseScatterGather; + BOOLEAN bBatchReceive; + BOOLEAN bLinkDetectSupported; + BOOLEAN bDoHardwareChecksum; + BOOLEAN bDoGuestChecksumOnReceive; + BOOLEAN bDoIPCheckTx; + BOOLEAN bDoIPCheckRx; + BOOLEAN bUseMergedBuffers; + BOOLEAN bDoKickOnNoBuffer; + BOOLEAN bSurprizeRemoved; + BOOLEAN bUsingMSIX; + BOOLEAN bUseIndirect; + BOOLEAN bHasHardwareFilters; + BOOLEAN bHasControlQueue; + BOOLEAN bNoPauseOnSuspend; + BOOLEAN bFastSuspendInProcess; + BOOLEAN bResetInProgress; + ULONG ulCurrentVlansFilterSet; + tMulticastData MulticastData; + UINT uNumberOfHandledRXPacketsInDPC; + NDIS_DEVICE_POWER_STATE powerState; + LONG dpcReceiveActive; + LONG counterDPCInside; + LONG bDPCInactive; + LONG InterruptStatus; + ULONG ulPriorityVlanSetting; + ULONG VlanId; + ULONGLONG ulFormalLinkSpeed; + ULONG ulEnableWakeup; + tMaxPacketSize MaxPacketSize; + ULONG nEnableDPCChecker; + ULONG ulUniqueID; + UCHAR PermanentMacAddress[ETH_LENGTH_OF_ADDRESS]; + UCHAR CurrentMacAddress[ETH_LENGTH_OF_ADDRESS]; + ULONG PacketFilter; + ULONG DummyLookAhead; + ULONG ulMilliesToConnect; + ULONG nDetectedStoppedTx; + ULONG nDetectedInactivity; + ULONG nVirtioHeaderSize; + /* send part */ +#if !defined(UNIFY_LOCKS) + NDIS_SPIN_LOCK SendLock; + NDIS_SPIN_LOCK ReceiveLock; +#else + union + { + NDIS_SPIN_LOCK SendLock; + NDIS_SPIN_LOCK ReceiveLock; + }; +#endif + NDIS_STATISTICS_INFO Statistics; + struct + { + ULONG framesCSOffload; + ULONG framesLSO; + ULONG framesIndirect; + ULONG framesRxPriority; + ULONG framesRxCSHwOK; + ULONG framesRxCSHwMissedBad; + ULONG framesRxCSHwMissedGood; + ULONG framesFilteredOut; + } extraStatistics; + tOurCounters Counters; + tOurCounters Limits; + tSendReceiveState SendState; + tSendReceiveState ReceiveState; + ONPAUSECOMPLETEPROC SendPauseCompletionProc; + ONPAUSECOMPLETEPROC ReceivePauseCompletionProc; + tReuseReceiveBufferProc ReuseBufferProc; + /* Net part - management of buffers and queues of QEMU */ + struct virtqueue * NetControlQueue; + tCompletePhysicalAddress ControlData; + struct virtqueue * NetReceiveQueue; + struct virtqueue * NetSendQueue; + /* list of Rx buffers available for data (under VIRTIO management) */ + LIST_ENTRY NetReceiveBuffers; + UINT NetNofReceiveBuffers; + /* list of Rx buffers waiting for return (under NDIS management) */ + LIST_ENTRY NetReceiveBuffersWaiting; + /* list of Tx buffers in process (under VIRTIO management) */ + LIST_ENTRY NetSendBuffersInUse; + /* list of Tx buffers ready for data (under MINIPORT management) */ + LIST_ENTRY NetFreeSendBuffers; + /* current number of free Tx descriptors */ + UINT nofFreeTxDescriptors; + /* initial number of free Tx descriptor(from cfg) - max number of available Tx descriptors */ + UINT maxFreeTxDescriptors; + /* current number of free Tx buffers, which can be submitted */ + UINT nofFreeHardwareBuffers; + /* maximal number of free Tx buffers, which can be used by SG */ + UINT maxFreeHardwareBuffers; + /* minimal number of free Tx buffers */ + UINT minFreeHardwareBuffers; + /* current number of Tx packets (or lists) to return */ + LONG NetTxPacketsToReturn; + /* total of Rx buffer in turnaround */ + UINT NetMaxReceiveBuffers; + struct VirtIOBufferDescriptor *sgTxGatherTable; + UINT nPnpEventIndex; + NDIS_DEVICE_PNP_EVENT PnpEvents[16]; + tOffloadSettings Offload; + NDIS_OFFLOAD_PARAMETERS InitialOffloadParameters; + // we keep these members common for XP and Vista + // for XP and non-MSI case of Vista they are set to zero + ULONG ulRxMessage; + ULONG ulTxMessage; + ULONG ulControlMessage; + + NDIS_MINIPORT_INTERRUPT Interrupt; + NDIS_HANDLE PacketPool; + NDIS_HANDLE BuffersPool; + NDIS_HANDLE WrapperConfigurationHandle; + LIST_ENTRY SendQueue; + LIST_ENTRY TxWaitingList; + NDIS_EVENT HaltEvent; + NDIS_TIMER ConnectTimer; + NDIS_TIMER DPCPostProcessTimer; + BOOLEAN bDmaInitialized; +}PARANDIS_ADAPTER, *PPARANDIS_ADAPTER; + +typedef enum { cpeOK, cpeNoBuffer, cpeInternalError, cpeTooLarge, cpeNoIndirect } tCopyPacketError; +typedef struct _tagCopyPacketResult +{ + ULONG size; + tCopyPacketError error; +}tCopyPacketResult; + +typedef struct _tagSynchronizedContext +{ + PARANDIS_ADAPTER *pContext; + PVOID Parameter; +}tSynchronizedContext; + +typedef BOOLEAN (*tSynchronizedProcedure)(tSynchronizedContext *context); + +/********************************************************** +LAZY release procedure returns buffers to VirtIO +only where there are no free buffers available + +NON-LAZY release releases transmit buffers from VirtIO +library every time there is something to release +***********************************************************/ +//#define LAZY_TX_RELEASE + +static inline bool VirtIODeviceGetHostFeature(PARANDIS_ADAPTER *pContext, unsigned uFeature) +{ + DPrintf(4, ("%s\n", __FUNCTION__)); + + return virtio_is_feature_enabled(pContext->ullHostFeatures, uFeature); +} + +static inline void VirtIODeviceEnableGuestFeature(PARANDIS_ADAPTER *pContext, unsigned uFeature) +{ + DPrintf(4, ("%s\n", __FUNCTION__)); + + virtio_feature_enable(pContext->ullGuestFeatures, uFeature); +} + +static BOOLEAN FORCEINLINE IsTimeToReleaseTx(PARANDIS_ADAPTER *pContext) +{ +#ifndef LAZY_TX_RELEASE + return pContext->nofFreeTxDescriptors < pContext->maxFreeTxDescriptors; +#else + return pContext->nofFreeTxDescriptors == 0; +#endif +} + +static BOOLEAN FORCEINLINE IsValidVlanId(PARANDIS_ADAPTER *pContext, ULONG VlanID) +{ + return pContext->VlanId == 0 || pContext->VlanId == VlanID; +} + +static BOOLEAN FORCEINLINE IsVlanSupported(PARANDIS_ADAPTER *pContext) +{ + return pContext->ulPriorityVlanSetting & 2; +} + +static BOOLEAN FORCEINLINE IsPrioritySupported(PARANDIS_ADAPTER *pContext) +{ + return pContext->ulPriorityVlanSetting & 1; +} + +BOOLEAN ParaNdis_ValidateMacAddress( + PUCHAR pcMacAddress, + BOOLEAN bLocal); + +NDIS_STATUS ParaNdis_InitializeContext( + PARANDIS_ADAPTER *pContext, + PNDIS_RESOURCE_LIST ResourceList); + +NDIS_STATUS ParaNdis_FinishInitialization( + PARANDIS_ADAPTER *pContext); + +VOID ParaNdis_CleanupContext( + PARANDIS_ADAPTER *pContext); + + +UINT ParaNdis_VirtIONetReleaseTransmitBuffers( + PARANDIS_ADAPTER *pContext); + +ULONG ParaNdis_DPCWorkBody( + PARANDIS_ADAPTER *pContext, + ULONG ulMaxPacketsToIndicate); + +NDIS_STATUS ParaNdis_SetMulticastList( + PARANDIS_ADAPTER *pContext, + PVOID Buffer, + ULONG BufferSize, + PUINT pBytesRead, + PUINT pBytesNeeded); + +VOID ParaNdis_VirtIOEnableIrqSynchronized( + PARANDIS_ADAPTER *pContext, + ULONG interruptSource); + +VOID ParaNdis_VirtIODisableIrqSynchronized( + PARANDIS_ADAPTER *pContext, + ULONG interruptSource); + +static __inline struct virtqueue * +ParaNdis_GetQueueForInterrupt(PARANDIS_ADAPTER *pContext, ULONG interruptSource) +{ + if (interruptSource & isTransmit) + return pContext->NetSendQueue; + if (interruptSource & isReceive) + return pContext->NetReceiveQueue; + + return NULL; +} + +static __inline BOOLEAN +ParaNDIS_IsQueueInterruptEnabled(struct virtqueue * _vq) +{ + return virtqueue_is_interrupt_enabled(_vq); +} + +VOID ParaNdis_OnPnPEvent( + PARANDIS_ADAPTER *pContext, + NDIS_DEVICE_PNP_EVENT pEvent, + PVOID pInfo, + ULONG ulSize); + +BOOLEAN ParaNdis_OnLegacyInterrupt( + PARANDIS_ADAPTER *pContext, + BOOLEAN *pRunDpc); + +BOOLEAN ParaNdis_OnQueuedInterrupt( + PARANDIS_ADAPTER *pContext, + BOOLEAN *pRunDpc, + ULONG knownInterruptSources); + +VOID ParaNdis_OnShutdown( + PARANDIS_ADAPTER *pContext); + +BOOLEAN ParaNdis_CheckForHang( + PARANDIS_ADAPTER *pContext); + +VOID ParaNdis_ReportLinkStatus( + PARANDIS_ADAPTER *pContext, + BOOLEAN bForce); + +NDIS_STATUS ParaNdis_PowerOn( + PARANDIS_ADAPTER *pContext +); + +VOID ParaNdis_PowerOff( + PARANDIS_ADAPTER *pContext +); + +void ParaNdis_DebugInitialize(PVOID DriverObject,PVOID RegistryPath); +void ParaNdis_DebugCleanup(PDRIVER_OBJECT pDriverObject); +void ParaNdis_DebugRegisterMiniport(PARANDIS_ADAPTER *pContext, BOOLEAN bRegister); + + +//#define ENABLE_HISTORY_LOG +#if !defined(ENABLE_HISTORY_LOG) + +static void FORCEINLINE ParaNdis_DebugHistory( + PARANDIS_ADAPTER *pContext, + eHistoryLogOperation op, + PVOID pParam1, + ULONG lParam2, + ULONG lParam3, + ULONG lParam4) +{ + +} + +#else + +void ParaNdis_DebugHistory( + PARANDIS_ADAPTER *pContext, + eHistoryLogOperation op, + PVOID pParam1, + ULONG lParam2, + ULONG lParam3, + ULONG lParam4); + +#endif + +typedef struct _tagTxOperationParameters +{ + tPacketType packet; + PVOID ReferenceValue; + UINT nofSGFragments; + ULONG ulDataSize; + ULONG offloadMss; + ULONG tcpHeaderOffset; + ULONG flags; //see tPacketOffloadRequest +}tTxOperationParameters; + +tCopyPacketResult ParaNdis_DoCopyPacketData( + PARANDIS_ADAPTER *pContext, + tTxOperationParameters *pParams); + +typedef struct _tagMapperResult +{ + USHORT usBuffersMapped; + USHORT usBufferSpaceUsed; + ULONG ulDataSize; +}tMapperResult; + + +tCopyPacketResult ParaNdis_DoSubmitPacket(PARANDIS_ADAPTER *pContext, tTxOperationParameters *Params); + +void ParaNdis_ResetOffloadSettings(PARANDIS_ADAPTER *pContext, tOffloadSettingsFlags *pDest, PULONG from); + +tChecksumCheckResult ParaNdis_CheckRxChecksum(PARANDIS_ADAPTER *pContext, ULONG virtioFlags, PVOID pRxPacket, ULONG len); + +void ParaNdis_CallOnBugCheck(PARANDIS_ADAPTER *pContext); + +/***************************************************** +Procedures to implement for NDIS specific implementation +******************************************************/ + +PVOID ParaNdis_AllocateMemory( + PARANDIS_ADAPTER *pContext, + ULONG ulRequiredSize); + +NDIS_STATUS NTAPI ParaNdis_FinishSpecificInitialization( + PARANDIS_ADAPTER *pContext); + +VOID ParaNdis_FinalizeCleanup( + PARANDIS_ADAPTER *pContext); + +NDIS_HANDLE ParaNdis_OpenNICConfiguration( + PARANDIS_ADAPTER *pContext); + +tPacketIndicationType ParaNdis_IndicateReceivedPacket( + PARANDIS_ADAPTER *pContext, + PVOID dataBuffer, + PULONG pLength, + BOOLEAN bPrepareOnly, + pIONetDescriptor pBufferDesc); + +VOID ParaNdis_IndicateReceivedBatch( + PARANDIS_ADAPTER *pContext, + tPacketIndicationType *pBatch, + ULONG nofPackets); + +VOID ParaNdis_PacketMapper( + PARANDIS_ADAPTER *pContext, + tPacketType packet, + PVOID Reference, + struct VirtIOBufferDescriptor *buffers, + pIONetDescriptor pDesc, + tMapperResult *pMapperResult + ); + +tCopyPacketResult ParaNdis_PacketCopier( + tPacketType packet, + PVOID dest, + ULONG maxSize, + PVOID refValue, + BOOLEAN bPreview); + +BOOLEAN ParaNdis_ProcessTx( + PARANDIS_ADAPTER *pContext, + BOOLEAN IsDpc, + BOOLEAN IsInterrupt); + +BOOLEAN ParaNdis_SetTimer( + NDIS_HANDLE timer, + LONG millies); + +BOOLEAN ParaNdis_SynchronizeWithInterrupt( + PARANDIS_ADAPTER *pContext, + ULONG messageId, + tSynchronizedProcedure procedure, + PVOID parameter); + +VOID ParaNdis_Suspend( + PARANDIS_ADAPTER *pContext); + +VOID ParaNdis_Resume( + PARANDIS_ADAPTER *pContext); + +VOID ParaNdis_OnTransmitBufferReleased( + PARANDIS_ADAPTER *pContext, + IONetDescriptor *pDesc); + + +typedef VOID (*tOnAdditionalPhysicalMemoryAllocated)( + PARANDIS_ADAPTER *pContext, + tCompletePhysicalAddress *pAddresses); + + +typedef struct _tagPhysicalAddressAllocationContext +{ + tCompletePhysicalAddress address; + PARANDIS_ADAPTER *pContext; + tOnAdditionalPhysicalMemoryAllocated Callback; +} tPhysicalAddressAllocationContext; + + +BOOLEAN ParaNdis_InitialAllocatePhysicalMemory( + PARANDIS_ADAPTER *pContext, + tCompletePhysicalAddress *pAddresses); + +VOID ParaNdis_FreePhysicalMemory( + PARANDIS_ADAPTER *pContext, + tCompletePhysicalAddress *pAddresses); + +BOOLEAN ParaNdis_BindBufferToPacket( + PARANDIS_ADAPTER *pContext, + pIONetDescriptor pBufferDesc); + +void ParaNdis_UnbindBufferFromPacket( + PARANDIS_ADAPTER *pContext, + pIONetDescriptor pBufferDesc); + +void ParaNdis_IndicateConnect( + PARANDIS_ADAPTER *pContext, + BOOLEAN bConnected, + BOOLEAN bForce); + +void ParaNdis_RestoreDeviceConfigurationAfterReset( + PARANDIS_ADAPTER *pContext); + +VOID ParaNdis_UpdateDeviceFilters( + PARANDIS_ADAPTER *pContext); + +VOID ParaNdis_DeviceFiltersUpdateVlanId( + PARANDIS_ADAPTER *pContext); + +VOID ParaNdis_SetPowerState( + PARANDIS_ADAPTER *pContext, + NDIS_DEVICE_POWER_STATE newState); + + +#endif //-OFFLOAD_UNIT_TEST + +typedef enum _tagppResult +{ + ppresNotTested = 0, + ppresNotIP = 1, + ppresIPV4 = 2, + ppresIPV6 = 3, + ppresIPTooShort = 1, + ppresPCSOK = 1, + ppresCSOK = 2, + ppresCSBad = 3, + ppresXxpOther = 1, + ppresXxpKnown = 2, + ppresXxpIncomplete = 3, + ppresIsTCP = 0, + ppresIsUDP = 1, +}ppResult; + +typedef union _tagTcpIpPacketParsingResult +{ + struct { + /* 0 - not tested, 1 - not IP, 2 - IPV4, 3 - IPV6 */ + ULONG ipStatus : 2; + /* 0 - not tested, 1 - n/a, 2 - CS, 3 - bad */ + ULONG ipCheckSum : 2; + /* 0 - not tested, 1 - PCS, 2 - CS, 3 - bad */ + ULONG xxpCheckSum : 2; + /* 0 - not tested, 1 - other, 2 - known(contains basic TCP or UDP header), 3 - known incomplete */ + ULONG xxpStatus : 2; + /* 1 - contains complete payload */ + ULONG xxpFull : 1; + ULONG TcpUdp : 1; + ULONG fixedIpCS : 1; + ULONG fixedXxpCS : 1; + ULONG IsFragment : 1; + ULONG reserved : 3; + ULONG ipHeaderSize : 8; + ULONG XxpIpHeaderSize : 8; + }; + ULONG value; +}tTcpIpPacketParsingResult; + +typedef enum _tagPacketOffloadRequest +{ + pcrIpChecksum = (1 << 0), + pcrTcpV4Checksum = (1 << 1), + pcrUdpV4Checksum = (1 << 2), + pcrTcpV6Checksum = (1 << 3), + pcrUdpV6Checksum = (1 << 4), + pcrTcpChecksum = (pcrTcpV4Checksum | pcrTcpV6Checksum), + pcrUdpChecksum = (pcrUdpV4Checksum | pcrUdpV6Checksum), + pcrAnyChecksum = (pcrIpChecksum | pcrTcpV4Checksum | pcrUdpV4Checksum | pcrTcpV6Checksum | pcrUdpV6Checksum), + pcrLSO = (1 << 5), + pcrIsIP = (1 << 6), + pcrFixIPChecksum = (1 << 7), + pcrFixPHChecksum = (1 << 8), + pcrFixTcpV4Checksum = (1 << 9), + pcrFixUdpV4Checksum = (1 << 10), + pcrFixTcpV6Checksum = (1 << 11), + pcrFixUdpV6Checksum = (1 << 12), + pcrFixXxpChecksum = (pcrFixTcpV4Checksum | pcrFixUdpV4Checksum | pcrFixTcpV6Checksum | pcrFixUdpV6Checksum), + pcrPriorityTag = (1 << 13), + pcrNoIndirect = (1 << 14) +}tPacketOffloadRequest; + +// sw offload +tTcpIpPacketParsingResult ParaNdis_CheckSumVerify(PVOID buffer, ULONG size, ULONG flags, LPCSTR caller); +tTcpIpPacketParsingResult ParaNdis_ReviewIPPacket(PVOID buffer, ULONG size, LPCSTR caller); + +void ParaNdis_PadPacketReceived(PVOID pDataBuffer, PULONG pLength); + +#endif diff --git a/drivers/network/dd/netkvm/Common/quverp.h b/drivers/network/dd/netkvm/Common/quverp.h new file mode 100644 index 00000000000..968305a928e --- /dev/null +++ b/drivers/network/dd/netkvm/Common/quverp.h @@ -0,0 +1,45 @@ +/* + * This file contains version resource related definitions + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include +#include "ntverp.h" + +#include "vendor.ver" + +#undef __BUILDMACHINE__ + +#undef VER_FILEDESCRIPTION_STR +#undef VER_INTERNALNAME_STR + +#define VER_LANGNEUTRAL + +#define VENDOR_VIRTIO_PRODUCT VENDOR_PRODUCT_PREFIX "VirtIO Ethernet Adapter" +#define VER_FILEDESCRIPTION_STR VENDOR_DESC_PREFIX "NDIS Miniport Driver" VENDOR_DESC_POSTFIX +#define VER_INTERNALNAME_STR "netkvm.sys" + +#include "common.ver" diff --git a/drivers/network/dd/netkvm/Common/rhel.ver b/drivers/network/dd/netkvm/Common/rhel.ver new file mode 100644 index 00000000000..f0f80fe9e63 --- /dev/null +++ b/drivers/network/dd/netkvm/Common/rhel.ver @@ -0,0 +1,64 @@ +/* + * This file contains rhel vendor specific + * resource (version) definitions for all drivers + * + * Copyright (c) 2017 Parallels IP Holdings GmbH + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * These defines are only for Visual Studio built-in rc editor + */ +#ifndef _NT_TARGET_MAJ + #define _NT_TARGET_MAJ 1 + #define _RHEL_RELEASE_VERSION_ 20 + #define _BUILD_MAJOR_VERSION_ 300 + #define _BUILD_MINOR_VERSION_ 5800 +#endif + +/* + * rhel versioning + */ +#define VENDOR_VIRTIO_1 _NT_TARGET_MAJ +#define VENDOR_VIRTIO_2 _RHEL_RELEASE_VERSION_ +#define VENDOR_VIRTIO_3 _BUILD_MAJOR_VERSION_ +#define VENDOR_VIRTIO_4 _BUILD_MINOR_VERSION_ + +/* + * rhel strings + */ +#define VENDOR_VIRTIO_COPYRIGHT "Copyright (C) " STRINGIFY(RHEL_COPYRIGHT_YEARS) " Red Hat, Inc." +#define VENDOR_VIRTIO_COMPANY "Red Hat,\040Inc." +#define VENDOR_PREFIX "Red Hat\040" +#define VENDOR_PRODUCT_PREFIX VENDOR_PREFIX +#define QEMU_PRODUCT_PREFIX "QEMU\040" +#define VENDOR_DESC_PREFIX VENDOR_PREFIX +#define VENDOR_DESC_POSTFIX "" + +/* + * remaining macro should be defined in project .rc file + * + * VENDOR_VIRTIO_PRODUCT, VER_FILEDESCRIPTION_STR, VER_INTERNALNAME_STR + */ diff --git a/drivers/network/dd/netkvm/Common/sw-offload.c b/drivers/network/dd/netkvm/Common/sw-offload.c new file mode 100644 index 00000000000..c41a1b9122a --- /dev/null +++ b/drivers/network/dd/netkvm/Common/sw-offload.c @@ -0,0 +1,619 @@ +/* + * This file contains SW Implementation of checksum computation for IP,TCP,UDP + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "ndis56common.h" + +#ifdef WPP_EVENT_TRACING +#include "sw-offload.tmh" +#endif +#include + +// till IP header size is 8 bit +#define MAX_SUPPORTED_IPV6_HEADERS (256 - 4) + +typedef ULONG IPV6_ADDRESS[4]; + +// IPv6 Header RFC 2460 (40 bytes) +typedef struct _tagIPv6Header { + UCHAR ip6_ver_tc; // traffic class(low nibble), version (high nibble) + UCHAR ip6_tc_fl; // traffic class(high nibble), flow label + USHORT ip6_fl; // flow label, the rest + USHORT ip6_payload_len; // length of following headers and payload + UCHAR ip6_next_header; // next header type + UCHAR ip6_hoplimit; // hop limit + IPV6_ADDRESS ip6_src_address; // + IPV6_ADDRESS ip6_dst_address; // +} IPv6Header; + +typedef union +{ + IPv6Header v6; + IPv4Header v4; +} IPHeader; + +// IPv6 Header RFC 2460 (n*8 bytes) +typedef struct _tagIPv6ExtHeader { + UCHAR ip6ext_next_header; // next header type + UCHAR ip6ext_hdr_len; // length of this header in 8 bytes unit, not including first 8 bytes + USHORT options; // +} IPv6ExtHeader; + +// IP Pseudo Header RFC 768 +typedef struct _tagIPv4PseudoHeader { + ULONG ipph_src; // Source address + ULONG ipph_dest; // Destination address + UCHAR ipph_zero; // 0 + UCHAR ipph_protocol; // TCP/UDP + USHORT ipph_length; // TCP/UDP length +}tIPv4PseudoHeader; + +// IPv6 Pseudo Header RFC 2460 +typedef struct _tagIPv6PseudoHeader { + IPV6_ADDRESS ipph_src; // Source address + IPV6_ADDRESS ipph_dest; // Destination address + ULONG ipph_length; // TCP/UDP length + UCHAR z1; // 0 + UCHAR z2; // 0 + UCHAR z3; // 0 + UCHAR ipph_protocol; // TCP/UDP +}tIPv6PseudoHeader; + + +#define PROTOCOL_TCP 6 +#define PROTOCOL_UDP 17 + + +#define IP_HEADER_LENGTH(pHeader) (((pHeader)->ip_verlen & 0x0F) << 2) +#define TCP_HEADER_LENGTH(pHeader) ((pHeader->tcp_flags & 0xF0) >> 2) + + + +static __inline USHORT CheckSumCalculator(ULONG val, PVOID buffer, ULONG len) +{ + PUSHORT pus = (PUSHORT)buffer; + ULONG count = len >> 1; + while (count--) val += *pus++; + if (len & 1) val += (USHORT)*(PUCHAR)pus; + val = (((val >> 16) | (val << 16)) + val) >> 16; + return (USHORT)~val; +} + + +/****************************************** + IP header checksum calculator +*******************************************/ +static __inline VOID CalculateIpChecksum(IPv4Header *pIpHeader) +{ + pIpHeader->ip_xsum = 0; + pIpHeader->ip_xsum = CheckSumCalculator(0, pIpHeader, IP_HEADER_LENGTH(pIpHeader)); +} + +static __inline tTcpIpPacketParsingResult +ProcessTCPHeader(tTcpIpPacketParsingResult _res, PVOID pIpHeader, ULONG len, USHORT ipHeaderSize) +{ + ULONG tcpipDataAt; + tTcpIpPacketParsingResult res = _res; + tcpipDataAt = ipHeaderSize + sizeof(TCPHeader); + res.xxpStatus = ppresXxpIncomplete; + res.TcpUdp = ppresIsTCP; + + if (len >= tcpipDataAt) + { + TCPHeader *pTcpHeader = (TCPHeader *)RtlOffsetToPointer(pIpHeader, ipHeaderSize); + res.xxpStatus = ppresXxpKnown; + tcpipDataAt = ipHeaderSize + TCP_HEADER_LENGTH(pTcpHeader); + res.XxpIpHeaderSize = tcpipDataAt; + } + else + { + DPrintf(2, ("tcp: %d < min headers %d", len, tcpipDataAt)); + } + return res; +} + +static __inline tTcpIpPacketParsingResult +ProcessUDPHeader(tTcpIpPacketParsingResult _res, PVOID pIpHeader, ULONG len, USHORT ipHeaderSize) +{ + tTcpIpPacketParsingResult res = _res; + ULONG udpDataStart = ipHeaderSize + sizeof(UDPHeader); + res.xxpStatus = ppresXxpIncomplete; + res.TcpUdp = ppresIsUDP; + res.XxpIpHeaderSize = udpDataStart; + if (len >= udpDataStart) + { + UDPHeader *pUdpHeader = (UDPHeader *)RtlOffsetToPointer(pIpHeader, ipHeaderSize); + USHORT datagramLength = swap_short(pUdpHeader->udp_length); + res.xxpStatus = ppresXxpKnown; + // may be full or not, but the datagram length is known + DPrintf(2, ("udp: len %d, datagramLength %d", len, datagramLength)); + } + return res; +} + +static __inline tTcpIpPacketParsingResult +QualifyIpPacket(IPHeader *pIpHeader, ULONG len) +{ + tTcpIpPacketParsingResult res; + UCHAR ver_len = pIpHeader->v4.ip_verlen; + UCHAR ip_version = (ver_len & 0xF0) >> 4; + USHORT ipHeaderSize = 0; + USHORT fullLength = 0; + res.value = 0; + + if (ip_version == 4) + { + ipHeaderSize = (ver_len & 0xF) << 2; + fullLength = swap_short(pIpHeader->v4.ip_length); + DPrintf(3, ("ip_version %d, ipHeaderSize %d, protocol %d, iplen %d", + ip_version, ipHeaderSize, pIpHeader->v4.ip_protocol, fullLength)); + res.ipStatus = (ipHeaderSize >= sizeof(IPv4Header)) ? ppresIPV4 : ppresNotIP; + if (len < ipHeaderSize) res.ipCheckSum = ppresIPTooShort; + if (fullLength) {} + else + { + DPrintf(2, ("ip v.%d, iplen %d", ip_version, fullLength)); + } + } + else if (ip_version == 6) + { + UCHAR nextHeader = pIpHeader->v6.ip6_next_header; + BOOLEAN bParsingDone = FALSE; + ipHeaderSize = sizeof(pIpHeader->v6); + res.ipStatus = ppresIPV6; + res.ipCheckSum = ppresCSOK; + fullLength = swap_short(pIpHeader->v6.ip6_payload_len); + fullLength += ipHeaderSize; + while (nextHeader != 59) + { + IPv6ExtHeader *pExt; + switch (nextHeader) + { + case PROTOCOL_TCP: + bParsingDone = TRUE; + res.xxpStatus = ppresXxpKnown; + res.TcpUdp = ppresIsTCP; + res.xxpFull = len >= fullLength ? 1 : 0; + res = ProcessTCPHeader(res, pIpHeader, len, ipHeaderSize); + break; + case PROTOCOL_UDP: + bParsingDone = TRUE; + res.xxpStatus = ppresXxpKnown; + res.TcpUdp = ppresIsUDP; + res.xxpFull = len >= fullLength ? 1 : 0; + res = ProcessUDPHeader(res, pIpHeader, len, ipHeaderSize); + break; + //existing extended headers + case 0: + __fallthrough; + case 60: + __fallthrough; + case 43: + __fallthrough; + case 44: + __fallthrough; + case 51: + __fallthrough; + case 50: + __fallthrough; + case 135: + if (len >= ((ULONG)ipHeaderSize + 8)) + { + pExt = (IPv6ExtHeader *)((PUCHAR)pIpHeader + ipHeaderSize); + nextHeader = pExt->ip6ext_next_header; + ipHeaderSize += 8; + ipHeaderSize += pExt->ip6ext_hdr_len * 8; + } + else + { + DPrintf(0, ("[%s] ERROR: Break in the middle of ext. headers(len %d, hdr > %d)", __FUNCTION__, len, ipHeaderSize)); + res.ipStatus = ppresNotIP; + bParsingDone = TRUE; + } + break; + //any other protocol + default: + res.xxpStatus = ppresXxpOther; + bParsingDone = TRUE; + break; + } + if (bParsingDone) + break; + } + if (ipHeaderSize <= MAX_SUPPORTED_IPV6_HEADERS) + { + DPrintf(3, ("ip_version %d, ipHeaderSize %d, protocol %d, iplen %d", + ip_version, ipHeaderSize, nextHeader, fullLength)); + res.ipHeaderSize = ipHeaderSize; + } + else + { + DPrintf(0, ("[%s] ERROR: IP chain is too large (%d)", __FUNCTION__, ipHeaderSize)); + res.ipStatus = ppresNotIP; + } + } + + if (res.ipStatus == ppresIPV4) + { + res.ipHeaderSize = ipHeaderSize; + res.xxpFull = len >= fullLength ? 1 : 0; + // bit "more fragments" or fragment offset mean the packet is fragmented + res.IsFragment = (pIpHeader->v4.ip_offset & ~0xC0) != 0; + switch (pIpHeader->v4.ip_protocol) + { + case PROTOCOL_TCP: + { + res = ProcessTCPHeader(res, pIpHeader, len, ipHeaderSize); + } + break; + case PROTOCOL_UDP: + { + res = ProcessUDPHeader(res, pIpHeader, len, ipHeaderSize); + } + break; + default: + res.xxpStatus = ppresXxpOther; + break; + } + } + return res; +} + +static __inline USHORT GetXxpHeaderAndPayloadLen(IPHeader *pIpHeader, tTcpIpPacketParsingResult res) +{ + if (res.ipStatus == ppresIPV4) + { + USHORT headerLength = IP_HEADER_LENGTH(&pIpHeader->v4); + USHORT len = swap_short(pIpHeader->v4.ip_length); + return len - headerLength; + } + if (res.ipStatus == ppresIPV6) + { + USHORT fullLength = swap_short(pIpHeader->v6.ip6_payload_len); + return fullLength + sizeof(pIpHeader->v6) - (USHORT)res.ipHeaderSize; + } + return 0; +} + +static __inline USHORT CalculateIpv4PseudoHeaderChecksum(IPv4Header *pIpHeader, USHORT headerAndPayloadLen) +{ + tIPv4PseudoHeader ipph; + USHORT checksum; + ipph.ipph_src = pIpHeader->ip_src; + ipph.ipph_dest = pIpHeader->ip_dest; + ipph.ipph_zero = 0; + ipph.ipph_protocol = pIpHeader->ip_protocol; + ipph.ipph_length = swap_short(headerAndPayloadLen); + checksum = CheckSumCalculator(0, &ipph, sizeof(ipph)); + return ~checksum; +} + + +static __inline USHORT CalculateIpv6PseudoHeaderChecksum(IPv6Header *pIpHeader, USHORT headerAndPayloadLen) +{ + tIPv6PseudoHeader ipph; + USHORT checksum; + ipph.ipph_src[0] = pIpHeader->ip6_src_address[0]; + ipph.ipph_src[1] = pIpHeader->ip6_src_address[1]; + ipph.ipph_src[2] = pIpHeader->ip6_src_address[2]; + ipph.ipph_src[3] = pIpHeader->ip6_src_address[3]; + ipph.ipph_dest[0] = pIpHeader->ip6_dst_address[0]; + ipph.ipph_dest[1] = pIpHeader->ip6_dst_address[1]; + ipph.ipph_dest[2] = pIpHeader->ip6_dst_address[2]; + ipph.ipph_dest[3] = pIpHeader->ip6_dst_address[3]; + ipph.z1 = ipph.z2 = ipph.z3 = 0; + ipph.ipph_protocol = pIpHeader->ip6_next_header; + ipph.ipph_length = swap_short(headerAndPayloadLen); + checksum = CheckSumCalculator(0, &ipph, sizeof(ipph)); + return ~checksum; +} + +static __inline USHORT CalculateIpPseudoHeaderChecksum(IPHeader *pIpHeader, + tTcpIpPacketParsingResult res, + USHORT headerAndPayloadLen) +{ + if (res.ipStatus == ppresIPV4) + return CalculateIpv4PseudoHeaderChecksum(&pIpHeader->v4, headerAndPayloadLen); + if (res.ipStatus == ppresIPV6) + return CalculateIpv6PseudoHeaderChecksum(&pIpHeader->v6, headerAndPayloadLen); + return 0; +} + +static __inline BOOLEAN +CompareNetCheckSumOnEndSystem(USHORT computedChecksum, USHORT arrivedChecksum) +{ + //According to RFC 1624 sec. 3 + //Checksum verification mechanism should treat 0xFFFF + //checksum value from received packet as 0x0000 + if(arrivedChecksum == 0xFFFF) + arrivedChecksum = 0; + + return computedChecksum == arrivedChecksum; +} + +/****************************************** + Calculates IP header checksum calculator + it can be already calculated + the header must be complete! +*******************************************/ +static __inline tTcpIpPacketParsingResult +VerifyIpChecksum( + IPv4Header *pIpHeader, + tTcpIpPacketParsingResult known, + BOOLEAN bFix) +{ + tTcpIpPacketParsingResult res = known; + if (res.ipCheckSum != ppresIPTooShort) + { + USHORT saved = pIpHeader->ip_xsum; + CalculateIpChecksum(pIpHeader); + res.ipCheckSum = CompareNetCheckSumOnEndSystem(pIpHeader->ip_xsum, saved) ? ppresCSOK : ppresCSBad; + if (!bFix) + pIpHeader->ip_xsum = saved; + else + res.fixedIpCS = res.ipCheckSum == ppresCSBad; + } + return res; +} + +/********************************************* +Calculates UDP checksum, assuming the checksum field +is initialized with pseudoheader checksum +**********************************************/ +static VOID CalculateUdpChecksumGivenPseudoCS(UDPHeader *pUdpHeader, ULONG udpLength) +{ + pUdpHeader->udp_xsum = CheckSumCalculator(0, pUdpHeader, udpLength); +} + +/********************************************* +Calculates TCP checksum, assuming the checksum field +is initialized with pseudoheader checksum +**********************************************/ +static __inline VOID CalculateTcpChecksumGivenPseudoCS(TCPHeader *pTcpHeader, ULONG tcpLength) +{ + pTcpHeader->tcp_xsum = CheckSumCalculator(0, pTcpHeader, tcpLength); +} + +/************************************************ +Checks (and fix if required) the TCP checksum +sets flags in result structure according to verification +TcpPseudoOK if valid pseudo CS was found +TcpOK if valid TCP checksum was found +************************************************/ +static __inline tTcpIpPacketParsingResult +VerifyTcpChecksum( IPHeader *pIpHeader, ULONG len, tTcpIpPacketParsingResult known, ULONG whatToFix) +{ + USHORT phcs; + tTcpIpPacketParsingResult res = known; + TCPHeader *pTcpHeader = (TCPHeader *)RtlOffsetToPointer(pIpHeader, res.ipHeaderSize); + USHORT saved = pTcpHeader->tcp_xsum; + USHORT xxpHeaderAndPayloadLen = GetXxpHeaderAndPayloadLen(pIpHeader, res); + if (len >= res.ipHeaderSize) + { + phcs = CalculateIpPseudoHeaderChecksum(pIpHeader, res, xxpHeaderAndPayloadLen); + res.xxpCheckSum = CompareNetCheckSumOnEndSystem(phcs, saved) ? ppresPCSOK : ppresCSBad; + if (res.xxpCheckSum != ppresPCSOK || whatToFix) + { + if (whatToFix & pcrFixPHChecksum) + { + if (len >= (ULONG)(res.ipHeaderSize + sizeof(*pTcpHeader))) + { + pTcpHeader->tcp_xsum = phcs; + res.fixedXxpCS = res.xxpCheckSum != ppresPCSOK; + } + else + res.xxpStatus = ppresXxpIncomplete; + } + else if (res.xxpFull) + { + //USHORT ipFullLength = swap_short(pIpHeader->v4.ip_length); + pTcpHeader->tcp_xsum = phcs; + CalculateTcpChecksumGivenPseudoCS(pTcpHeader, xxpHeaderAndPayloadLen); + if (CompareNetCheckSumOnEndSystem(pTcpHeader->tcp_xsum, saved)) + res.xxpCheckSum = ppresCSOK; + + if (!(whatToFix & pcrFixXxpChecksum)) + pTcpHeader->tcp_xsum = saved; + else + res.fixedXxpCS = + res.xxpCheckSum == ppresCSBad || res.xxpCheckSum == ppresPCSOK; + } + else if (whatToFix) + { + res.xxpStatus = ppresXxpIncomplete; + } + } + else if (res.xxpFull) + { + // we have correct PHCS and we do not need to fix anything + // there is a very small chance that it is also good TCP CS + // in such rare case we give a priority to TCP CS + CalculateTcpChecksumGivenPseudoCS(pTcpHeader, xxpHeaderAndPayloadLen); + if (CompareNetCheckSumOnEndSystem(pTcpHeader->tcp_xsum, saved)) + res.xxpCheckSum = ppresCSOK; + pTcpHeader->tcp_xsum = saved; + } + } + else + res.ipCheckSum = ppresIPTooShort; + return res; +} + +/************************************************ +Checks (and fix if required) the UDP checksum +sets flags in result structure according to verification +UdpPseudoOK if valid pseudo CS was found +UdpOK if valid UDP checksum was found +************************************************/ +static __inline tTcpIpPacketParsingResult +VerifyUdpChecksum( IPHeader *pIpHeader, ULONG len, tTcpIpPacketParsingResult known, ULONG whatToFix) +{ + USHORT phcs; + tTcpIpPacketParsingResult res = known; + UDPHeader *pUdpHeader = (UDPHeader *)RtlOffsetToPointer(pIpHeader, res.ipHeaderSize); + USHORT saved = pUdpHeader->udp_xsum; + USHORT xxpHeaderAndPayloadLen = GetXxpHeaderAndPayloadLen(pIpHeader, res); + if (len >= res.ipHeaderSize) + { + phcs = CalculateIpPseudoHeaderChecksum(pIpHeader, res, xxpHeaderAndPayloadLen); + res.xxpCheckSum = CompareNetCheckSumOnEndSystem(phcs, saved) ? ppresPCSOK : ppresCSBad; + if (whatToFix & pcrFixPHChecksum) + { + if (len >= (ULONG)(res.ipHeaderSize + sizeof(UDPHeader))) + { + pUdpHeader->udp_xsum = phcs; + res.fixedXxpCS = res.xxpCheckSum != ppresPCSOK; + } + else + res.xxpStatus = ppresXxpIncomplete; + } + else if (res.xxpCheckSum != ppresPCSOK || (whatToFix & pcrFixXxpChecksum)) + { + if (res.xxpFull) + { + pUdpHeader->udp_xsum = phcs; + CalculateUdpChecksumGivenPseudoCS(pUdpHeader, xxpHeaderAndPayloadLen); + if (CompareNetCheckSumOnEndSystem(pUdpHeader->udp_xsum, saved)) + res.xxpCheckSum = ppresCSOK; + + if (!(whatToFix & pcrFixXxpChecksum)) + pUdpHeader->udp_xsum = saved; + else + res.fixedXxpCS = + res.xxpCheckSum == ppresCSBad || res.xxpCheckSum == ppresPCSOK; + } + else + res.xxpCheckSum = ppresXxpIncomplete; + } + else if (res.xxpFull) + { + // we have correct PHCS and we do not need to fix anything + // there is a very small chance that it is also good UDP CS + // in such rare case we give a priority to UDP CS + CalculateUdpChecksumGivenPseudoCS(pUdpHeader, xxpHeaderAndPayloadLen); + if (CompareNetCheckSumOnEndSystem(pUdpHeader->udp_xsum, saved)) + res.xxpCheckSum = ppresCSOK; + pUdpHeader->udp_xsum = saved; + } + } + else + res.ipCheckSum = ppresIPTooShort; + + return res; +} + +static LPCSTR __inline GetPacketCase(tTcpIpPacketParsingResult res) +{ + static const char *const IPCaseName[4] = { "not tested", "Non-IP", "IPv4", "IPv6" }; + if (res.xxpStatus == ppresXxpKnown) return res.TcpUdp == ppresIsTCP ? + (res.ipStatus == ppresIPV4 ? "TCPv4" : "TCPv6") : + (res.ipStatus == ppresIPV4 ? "UDPv4" : "UDPv6"); + if (res.xxpStatus == ppresXxpIncomplete) return res.TcpUdp == ppresIsTCP ? "Incomplete TCP" : "Incomplete UDP"; + if (res.xxpStatus == ppresXxpOther) return "IP"; + return IPCaseName[res.ipStatus]; +} + +static LPCSTR __inline GetIPCSCase(tTcpIpPacketParsingResult res) +{ + static const char *const CSCaseName[4] = { "not tested", "(too short)", "OK", "Bad" }; + return CSCaseName[res.ipCheckSum]; +} + +static LPCSTR __inline GetXxpCSCase(tTcpIpPacketParsingResult res) +{ + static const char *const CSCaseName[4] = { "-", "PCS", "CS", "Bad" }; + return CSCaseName[res.xxpCheckSum]; +} + +static __inline VOID PrintOutParsingResult( + tTcpIpPacketParsingResult res, + int level, + LPCSTR procname) +{ + DPrintf(level, ("[%s] %s packet IPCS %s%s, checksum %s%s", procname, + GetPacketCase(res), + GetIPCSCase(res), + res.fixedIpCS ? "(fixed)" : "", + GetXxpCSCase(res), + res.fixedXxpCS ? "(fixed)" : "")); +} + +tTcpIpPacketParsingResult ParaNdis_CheckSumVerify(PVOID buffer, ULONG size, ULONG flags, LPCSTR caller) +{ + tTcpIpPacketParsingResult res = QualifyIpPacket(buffer, size); + if (res.ipStatus == ppresIPV4) + { + if (flags & pcrIpChecksum) + res = VerifyIpChecksum(buffer, res, (flags & pcrFixIPChecksum) != 0); + if(res.xxpStatus == ppresXxpKnown) + { + if (res.TcpUdp == ppresIsTCP) /* TCP */ + { + if(flags & pcrTcpV4Checksum) + { + res = VerifyTcpChecksum(buffer, size, res, flags & (pcrFixPHChecksum | pcrFixTcpV4Checksum)); + } + } + else /* UDP */ + { + if (flags & pcrUdpV4Checksum) + { + res = VerifyUdpChecksum(buffer, size, res, flags & (pcrFixPHChecksum | pcrFixUdpV4Checksum)); + } + } + } + } + else if (res.ipStatus == ppresIPV6) + { + if(res.xxpStatus == ppresXxpKnown) + { + if (res.TcpUdp == ppresIsTCP) /* TCP */ + { + if(flags & pcrTcpV6Checksum) + { + res = VerifyTcpChecksum(buffer, size, res, flags & (pcrFixPHChecksum | pcrFixTcpV6Checksum)); + } + } + else /* UDP */ + { + if (flags & pcrUdpV6Checksum) + { + res = VerifyUdpChecksum(buffer, size, res, flags & (pcrFixPHChecksum | pcrFixUdpV6Checksum)); + } + } + } + } + PrintOutParsingResult(res, 1, caller); + return res; +} + +tTcpIpPacketParsingResult ParaNdis_ReviewIPPacket(PVOID buffer, ULONG size, LPCSTR caller) +{ + tTcpIpPacketParsingResult res = QualifyIpPacket(buffer, size); + PrintOutParsingResult(res, 1, caller); + return res; +} diff --git a/drivers/network/dd/netkvm/Common/vendor.ver b/drivers/network/dd/netkvm/Common/vendor.ver new file mode 100644 index 00000000000..daae09d6679 --- /dev/null +++ b/drivers/network/dd/netkvm/Common/vendor.ver @@ -0,0 +1,98 @@ +/* + * This file contains resource (version) definitions for all drivers + * that are independent from vendor. + * + * Copyright (c) 2017 Parallels IP Holdings GmbH + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * These defines are only for Visual Studio built-in rc editor + * + * VER_OS <= $(TargetOS) for description postfix + * VER_ARCH <= $(VerArch) for description postfix + * VENDOR_VER <= $(_VENDOR_).ver (rhel by default) + */ +#ifndef VER_OS + #define VER_OS Win?? +#endif +#ifndef VER_ARCH + #define VER_ARCH x?? +#endif +#ifndef VENDOR_VER + #define VENDOR_VER rhel.ver + //#define VENDOR_VER vz.ver +#endif + +/* + * AUTO: these defines are defined in common.ver + * VER_PRODUCTVERSION <= VER_PRODUCTMAJORVERSION,VER_PRODUCTMINORVERSION,VER_PRODUCTBUILD,VER_PRODUCTBUILD_QFE + * VER_FILEVERSION <= VER_PRODUCTVERSION + * VER_FILEVERSION_STR <= VER_PRODUCTVERSION_STR + * VER_ORIGINALFILENAME_STR <= VER_INTERNALNAME_STR + */ + +/* + * COMMON: these defines are strictly required + */ +#define VER_LANGNEUTRAL +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_DRV_SYSTEM +#define VER_FILEDESCRIPTION_STR "File Description required" +#define VER_INTERNALNAME_STR "File Name required" + +/* + * STRINGIFY + */ +#define STRINGIFY_AUX(X) #X +#define STRINGIFY(X) STRINGIFY_AUX(X) + +/* + * VENDOR SPECIFIC + */ +#include STRINGIFY(VENDOR_VER) + +/* + * Applying vendor specific + */ +#undef VER_PRODUCTBUILD +#undef VER_PRODUCTBUILD_QFE +#undef VER_PRODUCTMAJORVERSION +#undef VER_PRODUCTMINORVERSION + +#define VER_PRODUCTMAJORVERSION VENDOR_VIRTIO_1 +#define VER_PRODUCTMINORVERSION VENDOR_VIRTIO_2 +#define VER_PRODUCTBUILD VENDOR_VIRTIO_3 +#define VER_PRODUCTBUILD_QFE VENDOR_VIRTIO_4 + +#undef VER_LEGALTRADEMARKS_STR +#undef VER_LEGALCOPYRIGHT_STR +#undef VER_COMPANYNAME_STR +#undef VER_PRODUCTNAME_STR + +#define VER_LEGALTRADEMARKS_STR "" +#define VER_LEGALCOPYRIGHT_STR VENDOR_VIRTIO_COPYRIGHT +#define VER_COMPANYNAME_STR VENDOR_VIRTIO_COMPANY +#define VER_PRODUCTNAME_STR VENDOR_VIRTIO_PRODUCT diff --git a/drivers/network/dd/netkvm/virtio/LICENSE b/drivers/network/dd/netkvm/virtio/LICENSE new file mode 100644 index 00000000000..15b448659cc --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/LICENSE @@ -0,0 +1,30 @@ +Copyright 2009-2017 Red Hat, Inc. and/or its affiliates. +Copyright 2016 Google, Inc. +Copyright 2007 IBM Corporation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/drivers/network/dd/netkvm/virtio/VirtIO.h b/drivers/network/dd/netkvm/virtio/VirtIO.h new file mode 100644 index 00000000000..030e918d744 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/VirtIO.h @@ -0,0 +1,128 @@ +#ifndef _LINUX_VIRTIO_H +#define _LINUX_VIRTIO_H + +#include "virtio_ring.h" + +#define scatterlist VirtIOBufferDescriptor + +struct VirtIOBufferDescriptor { + PHYSICAL_ADDRESS physAddr; + ULONG length; +}; + +typedef int (*proc_virtqueue_add_buf)( + struct virtqueue *vq, + struct scatterlist sg[], + unsigned int out_num, + unsigned int in_num, + void *opaque, + void *va_indirect, + ULONGLONG phys_indirect); + +typedef bool(*proc_virtqueue_kick_prepare)(struct virtqueue *vq); + +typedef void(*proc_virtqueue_kick_always)(struct virtqueue *vq); + +typedef void * (*proc_virtqueue_get_buf)(struct virtqueue *vq, unsigned int *len); + +typedef void(*proc_virtqueue_disable_cb)(struct virtqueue *vq); + +typedef bool(*proc_virtqueue_enable_cb)(struct virtqueue *vq); + +typedef bool(*proc_virtqueue_enable_cb_delayed)(struct virtqueue *vq); + +typedef void * (*proc_virtqueue_detach_unused_buf)(struct virtqueue *vq); + +typedef BOOLEAN(*proc_virtqueue_is_interrupt_enabled)(struct virtqueue *vq); + +typedef BOOLEAN(*proc_virtqueue_has_buf)(struct virtqueue *vq); + +typedef void(*proc_virtqueue_shutdown)(struct virtqueue *vq); + +/* Represents one virtqueue; only data pointed to by the vring structure is exposed to the host */ +struct virtqueue { + VirtIODevice *vdev; + unsigned int index; + void (*notification_cb)(struct virtqueue *vq); + void *notification_addr; + void *avail_va; + void *used_va; + proc_virtqueue_add_buf add_buf; + proc_virtqueue_kick_prepare kick_prepare; + proc_virtqueue_kick_always kick_always; + proc_virtqueue_get_buf get_buf; + proc_virtqueue_disable_cb disable_cb; + proc_virtqueue_enable_cb enable_cb; + proc_virtqueue_enable_cb_delayed enable_cb_delayed; + proc_virtqueue_detach_unused_buf detach_unused_buf; + proc_virtqueue_is_interrupt_enabled is_interrupt_enabled; + proc_virtqueue_has_buf has_buf; + proc_virtqueue_shutdown shutdown; +}; + +static inline int virtqueue_add_buf( + struct virtqueue *vq, + struct scatterlist sg[], + unsigned int out_num, + unsigned int in_num, + void *opaque, + void *va_indirect, + ULONGLONG phys_indirect) +{ + return vq->add_buf(vq, sg, out_num, in_num, opaque, va_indirect, phys_indirect); +} + +static inline bool virtqueue_kick_prepare(struct virtqueue *vq) +{ + return vq->kick_prepare(vq); +} + +static inline void virtqueue_kick_always(struct virtqueue *vq) +{ + vq->kick_always(vq); +} + +static inline void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len) +{ + return vq->get_buf(vq, len); +} + +static inline void virtqueue_disable_cb(struct virtqueue *vq) +{ + vq->disable_cb(vq); +} + +static inline bool virtqueue_enable_cb(struct virtqueue *vq) +{ + return vq->enable_cb(vq); +} + +static inline bool virtqueue_enable_cb_delayed(struct virtqueue *vq) +{ + return vq->enable_cb_delayed(vq); +} + +static inline void *virtqueue_detach_unused_buf(struct virtqueue *vq) +{ + return vq->detach_unused_buf(vq); +} + +static inline BOOLEAN virtqueue_is_interrupt_enabled(struct virtqueue *vq) +{ + return vq->is_interrupt_enabled(vq); +} + +static inline BOOLEAN virtqueue_has_buf(struct virtqueue *vq) +{ + return vq->has_buf(vq); +} + +static inline void virtqueue_shutdown(struct virtqueue *vq) +{ + vq->shutdown(vq); +} + +void virtqueue_notify(struct virtqueue *vq); +void virtqueue_kick(struct virtqueue *vq); + +#endif /* _LINUX_VIRTIO_H */ diff --git a/drivers/network/dd/netkvm/virtio/VirtIOPCICommon.c b/drivers/network/dd/netkvm/virtio/VirtIOPCICommon.c new file mode 100644 index 00000000000..1b20ce87dbc --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/VirtIOPCICommon.c @@ -0,0 +1,411 @@ +/* + * Virtio PCI driver - common functionality for all device versions + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori + * Rusty Russell + * Michael S. Tsirkin + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "osdep.h" +#include "virtio_pci.h" +#include "VirtIO.h" +#include "kdebugprint.h" +#include + +#include "virtio_pci_common.h" + +NTSTATUS virtio_device_initialize(VirtIODevice *vdev, + const VirtIOSystemOps *pSystemOps, + PVOID DeviceContext, + bool msix_used) +{ + NTSTATUS status; + + RtlZeroMemory(vdev, sizeof(VirtIODevice)); + vdev->DeviceContext = DeviceContext; + vdev->system = pSystemOps; + vdev->msix_used = msix_used; + vdev->info = vdev->inline_info; + vdev->maxQueues = ARRAYSIZE(vdev->inline_info); + + status = vio_modern_initialize(vdev); + if (status == STATUS_DEVICE_NOT_CONNECTED) { + /* fall back to legacy virtio device */ + status = vio_legacy_initialize(vdev); + } + if (NT_SUCCESS(status)) { + /* Always start by resetting the device */ + virtio_device_reset(vdev); + + /* Acknowledge that we've seen the device. */ + virtio_add_status(vdev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + + /* If we are here, we must have found a driver for the device */ + virtio_add_status(vdev, VIRTIO_CONFIG_S_DRIVER); + } + + return status; +} + +void virtio_device_shutdown(VirtIODevice *vdev) +{ + if (vdev->info && + vdev->info != vdev->inline_info) { + mem_free_nonpaged_block(vdev, vdev->info); + vdev->info = NULL; + } +} + +u8 virtio_get_status(VirtIODevice *vdev) +{ + return vdev->device->get_status(vdev); +} + +void virtio_set_status(VirtIODevice *vdev, u8 status) +{ + vdev->device->set_status(vdev, status); +} + +void virtio_add_status(VirtIODevice *vdev, u8 status) +{ + vdev->device->set_status(vdev, (u8)(vdev->device->get_status(vdev) | status)); +} + +void virtio_device_reset(VirtIODevice *vdev) +{ + vdev->device->reset(vdev); +} + +void virtio_device_ready(VirtIODevice *vdev) +{ + unsigned status = vdev->device->get_status(vdev); + + ASSERT(!(status & VIRTIO_CONFIG_S_DRIVER_OK)); + vdev->device->set_status(vdev, (u8)(status | VIRTIO_CONFIG_S_DRIVER_OK)); +} + +u64 virtio_get_features(VirtIODevice *vdev) +{ + return vdev->device->get_features(vdev); +} + +NTSTATUS virtio_set_features(VirtIODevice *vdev, u64 features) +{ + unsigned char dev_status; + NTSTATUS status; + + vdev->event_suppression_enabled = virtio_is_feature_enabled(features, VIRTIO_RING_F_EVENT_IDX); + vdev->packed_ring = virtio_is_feature_enabled(features, VIRTIO_F_RING_PACKED); + + status = vdev->device->set_features(vdev, features); + if (!NT_SUCCESS(status)) { + return status; + } + + if (!virtio_is_feature_enabled(features, VIRTIO_F_VERSION_1)) { + return status; + } + + virtio_add_status(vdev, VIRTIO_CONFIG_S_FEATURES_OK); + dev_status = vdev->device->get_status(vdev); + if (!(dev_status & VIRTIO_CONFIG_S_FEATURES_OK)) { + DPrintf(0, "virtio: device refuses features: %x\n", dev_status); + status = STATUS_INVALID_PARAMETER; + } + return status; +} + +/* Read @count fields, @bytes each. */ +static void virtio_cread_many(VirtIODevice *vdev, + unsigned int offset, + void *buf, size_t count, size_t bytes) +{ + u32 old, gen = vdev->device->get_config_generation ? + vdev->device->get_config_generation(vdev) : 0; + size_t i; + + do { + old = gen; + + for (i = 0; i < count; i++) { + vdev->device->get_config(vdev, (unsigned)(offset + bytes * i), + (char *)buf + i * bytes, (unsigned)bytes); + } + + gen = vdev->device->get_config_generation ? + vdev->device->get_config_generation(vdev) : 0; + } while (gen != old); +} + +void virtio_get_config(VirtIODevice *vdev, unsigned offset, + void *buf, unsigned len) +{ + switch (len) { + case 1: + case 2: + case 4: + vdev->device->get_config(vdev, offset, buf, len); + break; + case 8: + virtio_cread_many(vdev, offset, buf, 2, sizeof(u32)); + break; + default: + virtio_cread_many(vdev, offset, buf, len, 1); + break; + } +} + +/* Write @count fields, @bytes each. */ +static void virtio_cwrite_many(VirtIODevice *vdev, + unsigned int offset, + void *buf, size_t count, size_t bytes) +{ + size_t i; + for (i = 0; i < count; i++) { + vdev->device->set_config(vdev, (unsigned)(offset + bytes * i), + (char *)buf + i * bytes, (unsigned)bytes); + } +} + +void virtio_set_config(VirtIODevice *vdev, unsigned offset, + void *buf, unsigned len) +{ + switch (len) { + case 1: + case 2: + case 4: + vdev->device->set_config(vdev, offset, buf, len); + break; + case 8: + virtio_cwrite_many(vdev, offset, buf, 2, sizeof(u32)); + break; + default: + virtio_cwrite_many(vdev, offset, buf, len, 1); + break; + } +} + +NTSTATUS virtio_query_queue_allocation(VirtIODevice *vdev, + unsigned index, + unsigned short *pNumEntries, + unsigned long *pRingSize, + unsigned long *pHeapSize) +{ + return vdev->device->query_queue_alloc(vdev, index, pNumEntries, pRingSize, pHeapSize); +} + +NTSTATUS virtio_reserve_queue_memory(VirtIODevice *vdev, unsigned nvqs) +{ + if (nvqs > vdev->maxQueues) { + /* allocate new space for queue infos */ + void *new_info = mem_alloc_nonpaged_block(vdev, nvqs * virtio_get_queue_descriptor_size()); + if (!new_info) { + return STATUS_INSUFFICIENT_RESOURCES; + } + + if (vdev->info && vdev->info != vdev->inline_info) { + mem_free_nonpaged_block(vdev, vdev->info); + } + vdev->info = new_info; + vdev->maxQueues = nvqs; + } + return STATUS_SUCCESS; +} + +static NTSTATUS vp_setup_vq(struct virtqueue **queue, + VirtIODevice *vdev, unsigned index, + u16 msix_vec) +{ + VirtIOQueueInfo *info = &vdev->info[index]; + + NTSTATUS status = vdev->device->setup_queue(queue, vdev, info, index, msix_vec); + if (NT_SUCCESS(status)) { + info->vq = *queue; + } + + return status; +} + +NTSTATUS virtio_find_queue(VirtIODevice *vdev, unsigned index, + struct virtqueue **vq) +{ + u16 msix_vec = vdev_get_msix_vector(vdev, index); + return vp_setup_vq( + vq, + vdev, + index, + msix_vec); +} + +NTSTATUS virtio_find_queues(VirtIODevice *vdev, + unsigned nvqs, + struct virtqueue *vqs[]) +{ + unsigned i; + NTSTATUS status; + u16 msix_vec; + + status = virtio_reserve_queue_memory(vdev, nvqs); + if (!NT_SUCCESS(status)) { + return status; + } + + /* set up the device config interrupt */ + msix_vec = vdev_get_msix_vector(vdev, -1); + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + msix_vec = vdev->device->set_config_vector(vdev, msix_vec); + /* Verify we had enough resources to assign the vector */ + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + status = STATUS_DEVICE_BUSY; + goto error_find; + } + } + + /* set up queue interrupts */ + for (i = 0; i < nvqs; i++) { + msix_vec = vdev_get_msix_vector(vdev, i); + status = vp_setup_vq( + &vqs[i], + vdev, + i, + msix_vec); + if (!NT_SUCCESS(status)) { + goto error_find; + } + } + return STATUS_SUCCESS; + +error_find: + virtio_delete_queues(vdev); + return status; +} + +void virtio_delete_queue(struct virtqueue *vq) +{ + VirtIODevice *vdev = vq->vdev; + unsigned i = vq->index; + + vdev->device->delete_queue(&vdev->info[i]); + vdev->info[i].vq = NULL; +} + +void virtio_delete_queues(VirtIODevice *vdev) +{ + struct virtqueue *vq; + unsigned i; + + if (vdev->info == NULL) + return; + + for (i = 0; i < vdev->maxQueues; i++) { + vq = vdev->info[i].vq; + if (vq != NULL) { + vdev->device->delete_queue(&vdev->info[i]); + vdev->info[i].vq = NULL; + } + } +} + +u32 virtio_get_queue_size(struct virtqueue *vq) +{ + return vq->vdev->info[vq->index].num; +} + +u16 virtio_set_config_vector(VirtIODevice *vdev, u16 vector) +{ + return vdev->device->set_config_vector(vdev, vector); +} + +u16 virtio_set_queue_vector(struct virtqueue *vq, u16 vector) +{ + return vq->vdev->device->set_queue_vector(vq, vector); +} + +u8 virtio_read_isr_status(VirtIODevice *vdev) +{ + return ioread8(vdev, vdev->isr); +} + +int virtio_get_bar_index(PPCI_COMMON_HEADER pPCIHeader, PHYSICAL_ADDRESS BasePA) +{ + int iBar, i; + + /* no point in supporting PCI and CardBus bridges */ + ASSERT((pPCIHeader->HeaderType & ~PCI_MULTIFUNCTION) == PCI_DEVICE_TYPE); + + for (i = 0; i < PCI_TYPE0_ADDRESSES; i++) { + PHYSICAL_ADDRESS BAR; + BAR.LowPart = pPCIHeader->u.type0.BaseAddresses[i]; + + iBar = i; + if (BAR.LowPart & PCI_ADDRESS_IO_SPACE) { + /* I/O space */ + BAR.LowPart &= PCI_ADDRESS_IO_ADDRESS_MASK; + BAR.HighPart = 0; + } else if ((BAR.LowPart & PCI_ADDRESS_MEMORY_TYPE_MASK) == PCI_TYPE_64BIT) { + /* memory space 64-bit */ + BAR.LowPart &= PCI_ADDRESS_MEMORY_ADDRESS_MASK; + BAR.HighPart = pPCIHeader->u.type0.BaseAddresses[++i]; + } else { + /* memory space 32-bit */ + BAR.LowPart &= PCI_ADDRESS_MEMORY_ADDRESS_MASK; + BAR.HighPart = 0; + } + + if (BAR.QuadPart == BasePA.QuadPart) { + return iBar; + } + } + return -1; +} + +/* The notify function used when creating a virt queue, common to both modern + * and legacy (the difference is in how vq->notification_addr is set up). + */ +void vp_notify(struct virtqueue *vq) +{ + /* we write the queue's selector into the notification register to + * signal the other end */ + iowrite16(vq->vdev, (unsigned short)vq->index, vq->notification_addr); + DPrintf(6, "virtio: vp_notify vq->index = %x\n", vq->index); +} + +void virtqueue_notify(struct virtqueue *vq) +{ + vq->notification_cb(vq); +} + +void virtqueue_kick(struct virtqueue *vq) +{ + if (virtqueue_kick_prepare(vq)) { + virtqueue_notify(vq); + } +} diff --git a/drivers/network/dd/netkvm/virtio/VirtIOPCILegacy.c b/drivers/network/dd/netkvm/virtio/VirtIOPCILegacy.c new file mode 100644 index 00000000000..bb1398518d7 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/VirtIOPCILegacy.c @@ -0,0 +1,283 @@ +/* + * Virtio PCI driver - legacy (virtio 0.9) device support + * + * Copyright IBM Corp. 2007 + * + * Authors: + * Anthony Liguori + * Windows porting - Yan Vugenfirer + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "osdep.h" +#include "virtio_pci.h" +#include "VirtIO.h" +#include "kdebugprint.h" +#include "virtio_ring.h" +#include "virtio_pci_common.h" +#include "windows/virtio_ring_allocation.h" + +#ifdef WPP_EVENT_TRACING +#include "VirtIOPCILegacy.tmh" +#endif + +///////////////////////////////////////////////////////////////////////////////////// +// +// vio_legacy_dump_registers - Dump HW registers of the device +// +///////////////////////////////////////////////////////////////////////////////////// +void vio_legacy_dump_registers(VirtIODevice *vdev) +{ + DPrintf(5, "%s\n", __FUNCTION__); + + DPrintf(0, "[VIRTIO_PCI_HOST_FEATURES] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_HOST_FEATURES)); + DPrintf(0, "[VIRTIO_PCI_GUEST_FEATURES] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_GUEST_FEATURES)); + DPrintf(0, "[VIRTIO_PCI_QUEUE_PFN] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_PFN)); + DPrintf(0, "[VIRTIO_PCI_QUEUE_NUM] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NUM)); + DPrintf(0, "[VIRTIO_PCI_QUEUE_SEL] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_SEL)); + DPrintf(0, "[VIRTIO_PCI_QUEUE_NOTIFY] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NOTIFY)); + DPrintf(0, "[VIRTIO_PCI_STATUS] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_STATUS)); + DPrintf(0, "[VIRTIO_PCI_ISR] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_ISR)); +} + +static void vio_legacy_get_config(VirtIODevice * vdev, + unsigned offset, + void *buf, + unsigned len) +{ + ULONG_PTR ioaddr = vdev->addr + VIRTIO_PCI_CONFIG(vdev->msix_used) + offset; + u8 *ptr = buf; + unsigned i; + + DPrintf(5, "%s\n", __FUNCTION__); + + for (i = 0; i < len; i++) { + ptr[i] = ioread8(vdev, ioaddr + i); + } +} + +static void vio_legacy_set_config(VirtIODevice *vdev, + unsigned offset, + const void *buf, + unsigned len) +{ + ULONG_PTR ioaddr = vdev->addr + VIRTIO_PCI_CONFIG(vdev->msix_used) + offset; + const u8 *ptr = buf; + unsigned i; + + DPrintf(5, "%s\n", __FUNCTION__); + + for (i = 0; i < len; i++) { + iowrite8(vdev, ptr[i], ioaddr + i); + } +} + +static u8 vio_legacy_get_status(VirtIODevice *vdev) +{ + DPrintf(6, "%s\n", __FUNCTION__); + return ioread8(vdev, vdev->addr + VIRTIO_PCI_STATUS); +} + +static void vio_legacy_set_status(VirtIODevice *vdev, u8 status) +{ + DPrintf(6, "%s>>> %x\n", __FUNCTION__, status); + iowrite8(vdev, status, vdev->addr + VIRTIO_PCI_STATUS); +} + +static void vio_legacy_reset(VirtIODevice *vdev) +{ + /* 0 status means a reset. */ + iowrite8(vdev, 0, vdev->addr + VIRTIO_PCI_STATUS); +} + +static u64 vio_legacy_get_features(VirtIODevice *vdev) +{ + return ioread32(vdev, vdev->addr + VIRTIO_PCI_HOST_FEATURES); +} + +static NTSTATUS vio_legacy_set_features(VirtIODevice *vdev, u64 features) +{ + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev, &features); + + /* Make sure we don't have any features > 32 bits! */ + ASSERT((u32)features == features); + iowrite32(vdev, (u32)features, vdev->addr + VIRTIO_PCI_GUEST_FEATURES); + + return STATUS_SUCCESS; +} + +static u16 vio_legacy_set_config_vector(VirtIODevice *vdev, u16 vector) +{ + /* Setup the vector used for configuration events */ + iowrite16(vdev, vector, vdev->addr + VIRTIO_MSI_CONFIG_VECTOR); + /* Verify we had enough resources to assign the vector */ + /* Will also flush the write out to device */ + return ioread16(vdev, vdev->addr + VIRTIO_MSI_CONFIG_VECTOR); +} + +static u16 vio_legacy_set_queue_vector(struct virtqueue *vq, u16 vector) +{ + VirtIODevice *vdev = vq->vdev; + + iowrite16(vdev, (u16)vq->index, vdev->addr + VIRTIO_PCI_QUEUE_SEL); + iowrite16(vdev, vector, vdev->addr + VIRTIO_MSI_QUEUE_VECTOR); + return ioread16(vdev, vdev->addr + VIRTIO_MSI_QUEUE_VECTOR); +} + +static NTSTATUS vio_legacy_query_vq_alloc(VirtIODevice *vdev, + unsigned index, + unsigned short *pNumEntries, + unsigned long *pRingSize, + unsigned long *pHeapSize) +{ + unsigned long ring_size, data_size; + u16 num; + + /* Select the queue we're interested in */ + iowrite16(vdev, (u16)index, vdev->addr + VIRTIO_PCI_QUEUE_SEL); + + /* Check if queue is either not available or already active. */ + num = ioread16(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NUM); + if (!num || ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_PFN)) { + return STATUS_NOT_FOUND; + } + + ring_size = ROUND_TO_PAGES(vring_size(num, VIRTIO_PCI_VRING_ALIGN, false)); + data_size = ROUND_TO_PAGES(vring_control_block_size(num, false)); + + *pNumEntries = num; + *pRingSize = ring_size + data_size; + *pHeapSize = 0; + + return STATUS_SUCCESS; +} + +static NTSTATUS vio_legacy_setup_vq(struct virtqueue **queue, + VirtIODevice *vdev, + VirtIOQueueInfo *info, + unsigned index, + u16 msix_vec) +{ + struct virtqueue *vq; + unsigned long ring_size, heap_size; + NTSTATUS status; + + /* Select the queue and query allocation parameters */ + status = vio_legacy_query_vq_alloc(vdev, index, &info->num, &ring_size, &heap_size); + if (!NT_SUCCESS(status)) { + return status; + } + + info->queue = mem_alloc_contiguous_pages(vdev, ring_size); + if (info->queue == NULL) { + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* activate the queue */ + iowrite32(vdev, (u32)(mem_get_physical_address(vdev, info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT), + vdev->addr + VIRTIO_PCI_QUEUE_PFN); + + /* create the vring */ + vq = vring_new_virtqueue_split(index, info->num, + VIRTIO_PCI_VRING_ALIGN, vdev, + info->queue, vp_notify, + (u8 *)info->queue + ROUND_TO_PAGES(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN, false))); + if (!vq) { + status = STATUS_INSUFFICIENT_RESOURCES; + goto err_activate_queue; + } + + vq->notification_addr = (void *)(vdev->addr + VIRTIO_PCI_QUEUE_NOTIFY); + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + msix_vec = vdev->device->set_queue_vector(vq, msix_vec); + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + status = STATUS_DEVICE_BUSY; + goto err_assign; + } + } + + *queue = vq; + return STATUS_SUCCESS; + +err_assign: +err_activate_queue: + iowrite32(vdev, 0, vdev->addr + VIRTIO_PCI_QUEUE_PFN); + mem_free_contiguous_pages(vdev, info->queue); + return status; +} + +static void vio_legacy_del_vq(VirtIOQueueInfo *info) +{ + struct virtqueue *vq = info->vq; + VirtIODevice *vdev = vq->vdev; + + iowrite16(vdev, (u16)vq->index, vdev->addr + VIRTIO_PCI_QUEUE_SEL); + + if (vdev->msix_used) { + iowrite16(vdev, VIRTIO_MSI_NO_VECTOR, + vdev->addr + VIRTIO_MSI_QUEUE_VECTOR); + /* Flush the write out to device */ + ioread8(vdev, vdev->addr + VIRTIO_PCI_ISR); + } + + /* Select and deactivate the queue */ + iowrite32(vdev, 0, vdev->addr + VIRTIO_PCI_QUEUE_PFN); + + mem_free_contiguous_pages(vdev, info->queue); +} + +static const struct virtio_device_ops virtio_pci_device_ops = { + /* .get_config = */ vio_legacy_get_config, + /* .set_config = */ vio_legacy_set_config, + /* .get_config_generation = */ NULL, + /* .get_status = */ vio_legacy_get_status, + /* .set_status = */ vio_legacy_set_status, + /* .reset = */ vio_legacy_reset, + /* .get_features = */ vio_legacy_get_features, + /* .set_features = */ vio_legacy_set_features, + /* .set_config_vector = */ vio_legacy_set_config_vector, + /* .set_queue_vector = */ vio_legacy_set_queue_vector, + /* .query_queue_alloc = */ vio_legacy_query_vq_alloc, + /* .setup_queue = */ vio_legacy_setup_vq, + /* .delete_queue = */ vio_legacy_del_vq, +}; + +/* Legacy device initialization */ +NTSTATUS vio_legacy_initialize(VirtIODevice *vdev) +{ + size_t length = pci_get_resource_len(vdev, 0); + vdev->addr = (ULONG_PTR)pci_map_address_range(vdev, 0, 0, length); + + if (!vdev->addr) { + return STATUS_INSUFFICIENT_RESOURCES; + } + + vdev->isr = (u8 *)vdev->addr + VIRTIO_PCI_ISR; + + vdev->device = &virtio_pci_device_ops; + + return STATUS_SUCCESS; +} diff --git a/drivers/network/dd/netkvm/virtio/VirtIOPCIModern.c b/drivers/network/dd/netkvm/virtio/VirtIOPCIModern.c new file mode 100644 index 00000000000..8a44a2b3465 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/VirtIOPCIModern.c @@ -0,0 +1,597 @@ +/* + * Virtio PCI driver - modern (virtio 1.0) device support + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori + * Rusty Russell + * Michael S. Tsirkin + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "osdep.h" +#define VIRTIO_PCI_NO_LEGACY +#include "virtio_pci.h" +#include "VirtIO.h" +#include "kdebugprint.h" +#include "virtio_ring.h" +#include "virtio_pci_common.h" +#include "windows/virtio_ring_allocation.h" +#include + +#ifdef WPP_EVENT_TRACING +#include "VirtIOPCIModern.tmh" +#endif + +static void *vio_modern_map_capability(VirtIODevice *vdev, int cap_offset, + size_t minlen, u32 alignment, + u32 start, u32 size, size_t *len) +{ + u8 bar; + u32 bar_offset, bar_length; + void *addr; + + pci_read_config_byte(vdev, cap_offset + offsetof(struct virtio_pci_cap, bar), &bar); + pci_read_config_dword(vdev, cap_offset + offsetof(struct virtio_pci_cap, offset), &bar_offset); + pci_read_config_dword(vdev, cap_offset + offsetof(struct virtio_pci_cap, length), &bar_length); + + if (start + minlen > bar_length) { + DPrintf(0, "bar %i cap is not large enough to map %zu bytes at offset %u\n", bar, minlen, start); + return NULL; + } + + bar_length -= start; + bar_offset += start; + + if (bar_offset & (alignment - 1)) { + DPrintf(0, "bar %i offset %u not aligned to %u\n", bar, bar_offset, alignment); + return NULL; + } + + if (bar_length > size) { + bar_length = size; + } + + if (len) { + *len = bar_length; + } + + if (bar_offset + minlen > pci_get_resource_len(vdev, bar)) { + DPrintf(0, "bar %i is not large enough to map %zu bytes at offset %u\n", bar, minlen, bar_offset); + return NULL; + } + + addr = pci_map_address_range(vdev, bar, bar_offset, bar_length); + if (!addr) { + DPrintf(0, "unable to map %u bytes at bar %i offset %u\n", bar_length, bar, bar_offset); + } + return addr; +} + +static void *vio_modern_map_simple_capability(VirtIODevice *vdev, int cap_offset, size_t length, u32 alignment) +{ + return vio_modern_map_capability( + vdev, + cap_offset, + length, // minlen + alignment, + 0, // offset + (u32)length, // size is equal to minlen + NULL); // not interested in the full length +} + +static void vio_modern_get_config(VirtIODevice *vdev, unsigned offset, + void *buf, unsigned len) +{ + if (!vdev->config) { + ASSERT(!"Device has no config to read"); + return; + } + if (offset + len > vdev->config_len) { + ASSERT(!"Can't read beyond the config length"); + return; + } + + switch (len) { + case 1: + *(u8 *)buf = ioread8(vdev, vdev->config + offset); + break; + case 2: + *(u16 *)buf = ioread16(vdev, vdev->config + offset); + break; + case 4: + *(u32 *)buf = ioread32(vdev, vdev->config + offset); + break; + default: + ASSERT(!"Only 1, 2, 4 byte config reads are supported"); + } +} + +static void vio_modern_set_config(VirtIODevice *vdev, unsigned offset, + const void *buf, unsigned len) +{ + if (!vdev->config) { + ASSERT(!"Device has no config to write"); + return; + } + if (offset + len > vdev->config_len) { + ASSERT(!"Can't write beyond the config length"); + return; + } + + switch (len) { + case 1: + iowrite8(vdev, *(u8 *)buf, vdev->config + offset); + break; + case 2: + iowrite16(vdev, *(u16 *)buf, vdev->config + offset); + break; + case 4: + iowrite32(vdev, *(u32 *)buf, vdev->config + offset); + break; + default: + ASSERT(!"Only 1, 2, 4 byte config writes are supported"); + } +} + +static u32 vio_modern_get_generation(VirtIODevice *vdev) +{ + return ioread8(vdev, &vdev->common->config_generation); +} + +static u8 vio_modern_get_status(VirtIODevice *vdev) +{ + return ioread8(vdev, &vdev->common->device_status); +} + +static void vio_modern_set_status(VirtIODevice *vdev, u8 status) +{ + /* We should never be setting status to 0. */ + ASSERT(status != 0); + iowrite8(vdev, status, &vdev->common->device_status); +} + +static void vio_modern_reset(VirtIODevice *vdev) +{ + /* 0 status means a reset. */ + iowrite8(vdev, 0, &vdev->common->device_status); + /* After writing 0 to device_status, the driver MUST wait for a read of + * device_status to return 0 before reinitializing the device. + * This will flush out the status write, and flush in device writes, + * including MSI-X interrupts, if any. + */ + while (ioread8(vdev, &vdev->common->device_status)) { + vdev_sleep(vdev, 1); + } +} + +static u64 vio_modern_get_features(VirtIODevice *vdev) +{ + u64 features; + + iowrite32(vdev, 0, &vdev->common->device_feature_select); + features = ioread32(vdev, &vdev->common->device_feature); + iowrite32(vdev, 1, &vdev->common->device_feature_select); + features |= ((u64)ioread32(vdev, &vdev->common->device_feature) << 32); + + return features; +} + +static NTSTATUS vio_modern_set_features(VirtIODevice *vdev, u64 features) +{ + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev, &features); + + if (!virtio_is_feature_enabled(features, VIRTIO_F_VERSION_1)) { + DPrintf(0, "virtio: device uses modern interface but does not have VIRTIO_F_VERSION_1\n", 0); + return STATUS_INVALID_PARAMETER; + } + + iowrite32(vdev, 0, &vdev->common->guest_feature_select); + iowrite32(vdev, (u32)features, &vdev->common->guest_feature); + iowrite32(vdev, 1, &vdev->common->guest_feature_select); + iowrite32(vdev, features >> 32, &vdev->common->guest_feature); + + return STATUS_SUCCESS; +} + +static u16 vio_modern_set_config_vector(VirtIODevice *vdev, u16 vector) +{ + /* Setup the vector used for configuration events */ + iowrite16(vdev, vector, &vdev->common->msix_config); + /* Verify we had enough resources to assign the vector */ + /* Will also flush the write out to device */ + return ioread16(vdev, &vdev->common->msix_config); +} + +static u16 vio_modern_set_queue_vector(struct virtqueue *vq, u16 vector) +{ + VirtIODevice *vdev = vq->vdev; + volatile struct virtio_pci_common_cfg *cfg = vdev->common; + + iowrite16(vdev, (u16)vq->index, &cfg->queue_select); + iowrite16(vdev, vector, &cfg->queue_msix_vector); + return ioread16(vdev, &cfg->queue_msix_vector); +} + +static size_t vring_pci_size(u16 num, bool packed) +{ + /* We only need a cacheline separation. */ + return (size_t)ROUND_TO_PAGES(vring_size(num, SMP_CACHE_BYTES, packed)); +} + +static NTSTATUS vio_modern_query_vq_alloc(VirtIODevice *vdev, + unsigned index, + unsigned short *pNumEntries, + unsigned long *pRingSize, + unsigned long *pHeapSize) +{ + volatile struct virtio_pci_common_cfg *cfg = vdev->common; + u16 num; + + if (index >= ioread16(vdev, &cfg->num_queues)) { + return STATUS_NOT_FOUND; + } + + /* Select the queue we're interested in */ + iowrite16(vdev, (u16)index, &cfg->queue_select); + + /* Check if queue is either not available or already active. */ + num = ioread16(vdev, &cfg->queue_size); + /* QEMU has a bug where queues don't revert to inactive on device + * reset. Skip checking the queue_enable field until it is fixed. + */ + if (!num /*|| ioread16(vdev, &cfg->queue_enable)*/) { + return STATUS_NOT_FOUND; + } + + if (num & (num - 1)) { + DPrintf(0, "%p: bad queue size %u", vdev, num); + return STATUS_INVALID_PARAMETER; + } + + *pNumEntries = num; + *pRingSize = (unsigned long)vring_pci_size(num, vdev->packed_ring); + *pHeapSize = vring_control_block_size(num, vdev->packed_ring); + + return STATUS_SUCCESS; +} + +static NTSTATUS vio_modern_setup_vq(struct virtqueue **queue, + VirtIODevice *vdev, + VirtIOQueueInfo *info, + unsigned index, + u16 msix_vec) +{ + volatile struct virtio_pci_common_cfg *cfg = vdev->common; + struct virtqueue *vq; + void *vq_addr; + u16 off; + unsigned long ring_size, heap_size; + NTSTATUS status; + + /* select the queue and query allocation parameters */ + status = vio_modern_query_vq_alloc(vdev, index, &info->num, &ring_size, &heap_size); + if (!NT_SUCCESS(status)) { + return status; + } + + /* get offset of notification word for this vq */ + off = ioread16(vdev, &cfg->queue_notify_off); + + /* try to allocate contiguous pages, scale down on failure */ + while (!(info->queue = mem_alloc_contiguous_pages(vdev, vring_pci_size(info->num, vdev->packed_ring)))) { + if (info->num > 0) { + info->num /= 2; + } else { + return STATUS_INSUFFICIENT_RESOURCES; + } + } + + vq_addr = mem_alloc_nonpaged_block(vdev, heap_size); + if (vq_addr == NULL) { + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* create the vring */ + if (vdev->packed_ring) { + vq = vring_new_virtqueue_packed(index, info->num, + SMP_CACHE_BYTES, vdev, + info->queue, vp_notify, vq_addr); + } else { + vq = vring_new_virtqueue_split(index, info->num, + SMP_CACHE_BYTES, vdev, + info->queue, vp_notify, vq_addr); + } + + if (!vq) { + status = STATUS_INSUFFICIENT_RESOURCES; + goto err_new_queue; + } + + /* activate the queue */ + iowrite16(vdev, info->num, &cfg->queue_size); + iowrite64_twopart(vdev, mem_get_physical_address(vdev, info->queue), + &cfg->queue_desc_lo, &cfg->queue_desc_hi); + iowrite64_twopart(vdev, mem_get_physical_address(vdev, vq->avail_va), + &cfg->queue_avail_lo, &cfg->queue_avail_hi); + iowrite64_twopart(vdev, mem_get_physical_address(vdev, vq->used_va), + &cfg->queue_used_lo, &cfg->queue_used_hi); + + if (vdev->notify_base) { + /* offset should not wrap */ + if ((u64)off * vdev->notify_offset_multiplier + 2 + > vdev->notify_len) { + DPrintf(0, + "%p: bad notification offset %u (x %u) " + "for queue %u > %zd", + vdev, + off, vdev->notify_offset_multiplier, + index, vdev->notify_len); + status = STATUS_INVALID_PARAMETER; + goto err_map_notify; + } + vq->notification_addr = (void *)(vdev->notify_base + + off * vdev->notify_offset_multiplier); + } else { + vq->notification_addr = vio_modern_map_capability(vdev, + vdev->notify_map_cap, 2, 2, + off * vdev->notify_offset_multiplier, 2, + NULL); + } + + if (!vq->notification_addr) { + status = STATUS_INSUFFICIENT_RESOURCES; + goto err_map_notify; + } + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + msix_vec = vdev->device->set_queue_vector(vq, msix_vec); + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + status = STATUS_DEVICE_BUSY; + goto err_assign_vector; + } + } + + /* enable the queue */ + iowrite16(vdev, 1, &vdev->common->queue_enable); + + *queue = vq; + return STATUS_SUCCESS; + +err_assign_vector: +err_map_notify: + virtqueue_shutdown(vq); +err_new_queue: + mem_free_nonpaged_block(vdev, vq_addr); + mem_free_contiguous_pages(vdev, info->queue); + return status; +} + +static void vio_modern_del_vq(VirtIOQueueInfo *info) +{ + struct virtqueue *vq = info->vq; + VirtIODevice *vdev = vq->vdev; + + iowrite16(vdev, (u16)vq->index, &vdev->common->queue_select); + + if (vdev->msix_used) { + iowrite16(vdev, VIRTIO_MSI_NO_VECTOR, &vdev->common->queue_msix_vector); + /* Flush the write out to device */ + ioread16(vdev, &vdev->common->queue_msix_vector); + } + + virtqueue_shutdown(vq); + + mem_free_nonpaged_block(vdev, vq); + mem_free_contiguous_pages(vdev, info->queue); +} + +static const struct virtio_device_ops virtio_pci_device_ops = { + /* .get_config = */ vio_modern_get_config, + /* .set_config = */ vio_modern_set_config, + /* .get_config_generation = */ vio_modern_get_generation, + /* .get_status = */ vio_modern_get_status, + /* .set_status = */ vio_modern_set_status, + /* .reset = */ vio_modern_reset, + /* .get_features = */ vio_modern_get_features, + /* .set_features = */ vio_modern_set_features, + /* .set_config_vector = */ vio_modern_set_config_vector, + /* .set_queue_vector = */ vio_modern_set_queue_vector, + /* .query_queue_alloc = */ vio_modern_query_vq_alloc, + /* .setup_queue = */ vio_modern_setup_vq, + /* .delete_queue = */ vio_modern_del_vq, +}; + +static u8 find_next_pci_vendor_capability(VirtIODevice *vdev, u8 offset) +{ + u8 id = 0; + int iterations = 48; + + if (pci_read_config_byte(vdev, offset, &offset) != 0) { + return 0; + } + + while (iterations-- && offset >= 0x40) { + offset &= ~3; + if (pci_read_config_byte(vdev, offset + offsetof(PCI_CAPABILITIES_HEADER, + CapabilityID), &id) != 0) { + break; + } + if (id == 0xFF) { + break; + } + if (id == PCI_CAPABILITY_ID_VENDOR_SPECIFIC) { + return offset; + } + if (pci_read_config_byte(vdev, offset + offsetof(PCI_CAPABILITIES_HEADER, + Next), &offset) != 0) { + break; + } + } + return 0; +} + +static u8 find_first_pci_vendor_capability(VirtIODevice *vdev) +{ + u8 hdr_type, offset; + u16 status; + + if (pci_read_config_byte(vdev, offsetof(PCI_COMMON_HEADER, HeaderType), &hdr_type) != 0) { + return 0; + } + if (pci_read_config_word(vdev, offsetof(PCI_COMMON_HEADER, Status), &status) != 0) { + return 0; + } + if ((status & PCI_STATUS_CAPABILITIES_LIST) == 0) { + return 0; + } + + switch (hdr_type & ~PCI_MULTIFUNCTION) { + case PCI_BRIDGE_TYPE: + offset = offsetof(PCI_COMMON_HEADER, u.type1.CapabilitiesPtr); + break; + case PCI_CARDBUS_BRIDGE_TYPE: + offset = offsetof(PCI_COMMON_HEADER, u.type2.CapabilitiesPtr); + break; + default: + offset = offsetof(PCI_COMMON_HEADER, u.type0.CapabilitiesPtr); + break; + } + + if (offset != 0) { + offset = find_next_pci_vendor_capability(vdev, offset); + } + return offset; +} + +/* Populate Offsets with virtio vendor capability offsets within the PCI config space */ +static void find_pci_vendor_capabilities(VirtIODevice *vdev, int *Offsets, size_t nOffsets) +{ + u8 offset = find_first_pci_vendor_capability(vdev); + while (offset > 0) { + u8 cfg_type, bar; + pci_read_config_byte(vdev, offset + offsetof(struct virtio_pci_cap, cfg_type), &cfg_type); + pci_read_config_byte(vdev, offset + offsetof(struct virtio_pci_cap, bar), &bar); + + if (bar < PCI_TYPE0_ADDRESSES && + cfg_type < nOffsets && + pci_get_resource_len(vdev, bar) > 0) { + Offsets[cfg_type] = offset; + } + + offset = find_next_pci_vendor_capability(vdev, offset + offsetof(PCI_CAPABILITIES_HEADER, Next)); + } +} + +/* Modern device initialization */ +NTSTATUS vio_modern_initialize(VirtIODevice *vdev) +{ + int capabilities[VIRTIO_PCI_CAP_PCI_CFG]; + + u32 notify_length; + u32 notify_offset; + + RtlZeroMemory(capabilities, sizeof(capabilities)); + find_pci_vendor_capabilities(vdev, capabilities, VIRTIO_PCI_CAP_PCI_CFG); + + /* Check for a common config, if not found use legacy mode */ + if (!capabilities[VIRTIO_PCI_CAP_COMMON_CFG]) { + DPrintf(0, "%s(%p): device not found\n", __FUNCTION__, vdev); + return STATUS_DEVICE_NOT_CONNECTED; + } + + /* Check isr and notify caps, if not found fail */ + if (!capabilities[VIRTIO_PCI_CAP_ISR_CFG] || !capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG]) { + DPrintf(0, "%s(%p): missing capabilities %i/%i/%i\n", + __FUNCTION__, vdev, + capabilities[VIRTIO_PCI_CAP_COMMON_CFG], + capabilities[VIRTIO_PCI_CAP_ISR_CFG], + capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG]); + return STATUS_INVALID_PARAMETER; + } + + /* Map bars according to the capabilities */ + vdev->common = vio_modern_map_simple_capability(vdev, + capabilities[VIRTIO_PCI_CAP_COMMON_CFG], + sizeof(struct virtio_pci_common_cfg), 4); + if (!vdev->common) { + return STATUS_INVALID_PARAMETER; + } + + vdev->isr = vio_modern_map_simple_capability(vdev, + capabilities[VIRTIO_PCI_CAP_ISR_CFG], + sizeof(u8), 1); + if (!vdev->isr) { + return STATUS_INVALID_PARAMETER; + } + + /* Read notify_off_multiplier from config space. */ + pci_read_config_dword(vdev, + capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG] + offsetof(struct virtio_pci_notify_cap, + notify_off_multiplier), + &vdev->notify_offset_multiplier); + + /* Read notify length and offset from config space. */ + pci_read_config_dword(vdev, + capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG] + offsetof(struct virtio_pci_notify_cap, + cap.length), + ¬ify_length); + pci_read_config_dword(vdev, + capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG] + offsetof(struct virtio_pci_notify_cap, + cap.offset), + ¬ify_offset); + + /* Map the notify capability if it's small enough. + * Otherwise, map each VQ individually later. + */ + if (notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { + vdev->notify_base = vio_modern_map_capability(vdev, + capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG], 2, 2, + 0, notify_length, + &vdev->notify_len); + if (!vdev->notify_base) { + return STATUS_INVALID_PARAMETER; + } + } else { + vdev->notify_map_cap = capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG]; + } + + /* Map the device config capability, the PAGE_SIZE size is a guess */ + if (capabilities[VIRTIO_PCI_CAP_DEVICE_CFG]) { + vdev->config = vio_modern_map_capability(vdev, + capabilities[VIRTIO_PCI_CAP_DEVICE_CFG], 0, 4, + 0, PAGE_SIZE, + &vdev->config_len); + if (!vdev->config) { + return STATUS_INVALID_PARAMETER; + } + } + + vdev->device = &virtio_pci_device_ops; + + return STATUS_SUCCESS; +} diff --git a/drivers/network/dd/netkvm/virtio/VirtIORing-Packed.c b/drivers/network/dd/netkvm/virtio/VirtIORing-Packed.c new file mode 100644 index 00000000000..dda34e0fdae --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/VirtIORing-Packed.c @@ -0,0 +1,651 @@ +/* + * Packed virtio ring manipulation routines + * + * Copyright 2019 Red Hat, Inc. + * + * Authors: + * Yuri Benditovich + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "osdep.h" +#include "virtio_pci.h" +#include "VirtIO.h" +#include "kdebugprint.h" +#include "virtio_ring.h" +#include "windows/virtio_ring_allocation.h" + +#include + +struct vring_packed_desc_event { + /* Descriptor Ring Change Event Offset/Wrap Counter. */ + __le16 off_wrap; + /* Descriptor Ring Change Event Flags. */ + __le16 flags; +}; + +struct vring_packed_desc { + /* Buffer Address. */ + __virtio64 addr; + /* Buffer Length. */ + __le32 len; + /* Buffer ID. */ + __le16 id; + /* The flags depending on descriptor type. */ + __le16 flags; +}; + +#include + +#define BUG_ON(condition) { if (condition) { KeBugCheck(0xE0E1E2E3); }} +#define BAD_RING(vq, fmt, ...) DPrintf(0, "%s: queue %d: " fmt, __FUNCTION__, vq->vq.index, __VA_ARGS__); BUG_ON(true) + +/* This marks a buffer as continuing via the next field. */ +#define VRING_DESC_F_NEXT 1 +/* This marks a buffer as write-only (otherwise read-only). */ +#define VRING_DESC_F_WRITE 2 +/* This means the buffer contains a list of buffer descriptors. */ +#define VRING_DESC_F_INDIRECT 4 + +/* + * Mark a descriptor as available or used in packed ring. + * Notice: they are defined as shifts instead of shifted values. + */ +#define VRING_PACKED_DESC_F_AVAIL 7 +#define VRING_PACKED_DESC_F_USED 15 + +/* Enable events in packed ring. */ +#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0 +/* Disable events in packed ring. */ +#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1 + +/* + * Enable events for a specific descriptor in packed ring. + * (as specified by Descriptor Ring Change Event Offset/Wrap Counter). + * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated. + */ +#define VRING_PACKED_EVENT_FLAG_DESC 0x2 + /* + * Wrap counter bit shift in event suppression structure + * of packed ring. + */ +#define VRING_PACKED_EVENT_F_WRAP_CTR 15 + +/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ +/* Assuming a given event_idx value from the other side, if + * we have just incremented index from old to new_idx, + * should we trigger an event? + */ +static inline bool vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) +{ + /* Note: Xen has similar logic for notification hold-off + * in include/xen/interface/io/ring.h with req_event and req_prod + * corresponding to event_idx + 1 and new_idx respectively. + * Note also that req_event and req_prod in Xen start at 1, + * event indexes in virtio start at 0. */ + return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); +} + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct virtqueue_packed { + struct virtqueue vq; + /* Number we've added since last sync. */ + unsigned int num_added; + /* Head of free buffer list. */ + unsigned int free_head; + /* Number of free descriptors */ + unsigned int num_free; + /* Last used index we've seen. */ + u16 last_used_idx; + /* Avail used flags. */ + u16 avail_used_flags; + struct + { + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + /* Device ring wrap counter. */ + bool used_wrap_counter; + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + } packed; + struct vring_desc_state_packed desc_states[]; +}; + +#define packedvq(vq) ((struct virtqueue_packed *)vq) + +unsigned int vring_control_block_size_packed(u16 qsize) +{ + return sizeof(struct virtqueue_packed) + sizeof(struct vring_desc_state_packed) * qsize; +} + +unsigned long vring_size_packed(unsigned int num, unsigned long align) +{ + /* array of descriptors */ + unsigned long res = num * sizeof(struct vring_packed_desc); + /* driver and device event */ + res += 2 * sizeof(struct vring_packed_desc_event); + return res; +} + +static int virtqueue_add_buf_packed( + struct virtqueue *_vq, /* the queue */ + struct scatterlist sg[], /* sg array of length out + in */ + unsigned int out, /* number of driver->device buffer descriptors in sg */ + unsigned int in, /* number of device->driver buffer descriptors in sg */ + void *opaque, /* later returned from virtqueue_get_buf */ + void *va_indirect, /* VA of the indirect page or NULL */ + ULONGLONG phys_indirect) /* PA of the indirect page or 0 */ +{ + struct virtqueue_packed *vq = packedvq(_vq); + unsigned int descs_used; + struct vring_packed_desc *desc; + u16 head, id, i; + + descs_used = out + in; + head = vq->packed.next_avail_idx; + id = (u16)vq->free_head; + + BUG_ON(descs_used == 0); + BUG_ON(id >= vq->packed.vring.num); + + if (va_indirect && vq->num_free > 0) { + desc = va_indirect; + for (i = 0; i < descs_used; i++) { + desc[i].flags = i < out ? 0 : VRING_DESC_F_WRITE; + desc[i].addr = sg[i].physAddr.QuadPart; + desc[i].len = sg[i].length; + } + vq->packed.vring.desc[head].addr = phys_indirect; + vq->packed.vring.desc[head].len = descs_used * sizeof(struct vring_packed_desc); + vq->packed.vring.desc[head].id = id; + + KeMemoryBarrier(); + vq->packed.vring.desc[head].flags = VRING_DESC_F_INDIRECT | vq->avail_used_flags; + + DPrintf(5, "Added buffer head %i to Q%d\n", head, vq->vq.index); + head++; + if (head >= vq->packed.vring.num) { + head = 0; + vq->packed.avail_wrap_counter ^= 1; + vq->avail_used_flags ^= + 1 << VRING_PACKED_DESC_F_AVAIL | + 1 << VRING_PACKED_DESC_F_USED; + } + vq->packed.next_avail_idx = head; + /* We're using some buffers from the free list. */ + vq->num_free -= 1; + vq->num_added += 1; + + vq->free_head = vq->packed.desc_state[id].next; + + /* Store token and indirect buffer state. */ + vq->packed.desc_state[id].num = 1; + vq->packed.desc_state[id].data = opaque; + vq->packed.desc_state[id].last = id; + + } else { + unsigned int n; + u16 curr, prev, head_flags; + if (vq->num_free < descs_used) { + DPrintf(6, "Can't add buffer to Q%d\n", vq->vq.index); + return -ENOSPC; + } + desc = vq->packed.vring.desc; + i = head; + curr = id; + for (n = 0; n < descs_used; n++) { + u16 flags = vq->avail_used_flags; + flags |= n < out ? 0 : VRING_DESC_F_WRITE; + if (n != descs_used - 1) { + flags |= VRING_DESC_F_NEXT; + } + desc[i].addr = sg[n].physAddr.QuadPart; + desc[i].len = sg[n].length; + desc[i].id = id; + if (n == 0) { + head_flags = flags; + } + else { + desc[i].flags = flags; + } + + prev = curr; + curr = vq->packed.desc_state[curr].next; + + if (++i >= vq->packed.vring.num) { + i = 0; + vq->avail_used_flags ^= + 1 << VRING_PACKED_DESC_F_AVAIL | + 1 << VRING_PACKED_DESC_F_USED; + } + } + + if (i < head) + vq->packed.avail_wrap_counter ^= 1; + + /* We're using some buffers from the free list. */ + vq->num_free -= descs_used; + + /* Update free pointer */ + vq->packed.next_avail_idx = i; + vq->free_head = curr; + + /* Store token. */ + vq->packed.desc_state[id].num = (u16)descs_used; + vq->packed.desc_state[id].data = opaque; + vq->packed.desc_state[id].last = prev; + + /* + * A driver MUST NOT make the first descriptor in the list + * available before all subsequent descriptors comprising + * the list are made available. + */ + KeMemoryBarrier(); + vq->packed.vring.desc[head].flags = head_flags; + vq->num_added += descs_used; + + DPrintf(5, "Added buffer head @%i+%d to Q%d\n", head, descs_used, vq->vq.index); + } + + return 0; +} + +static void detach_buf_packed(struct virtqueue_packed *vq, unsigned int id) +{ + struct vring_desc_state_packed *state = &vq->packed.desc_state[id]; + + /* Clear data ptr. */ + state->data = NULL; + + vq->packed.desc_state[state->last].next = (u16)vq->free_head; + vq->free_head = id; + vq->num_free += state->num; +} + +static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq) +{ + struct virtqueue_packed *vq = packedvq(_vq); + unsigned int i; + void *buf; + + for (i = 0; i < vq->packed.vring.num; i++) { + if (!vq->packed.desc_state[i].data) + continue; + /* detach_buf clears data, so grab it now. */ + buf = vq->packed.desc_state[i].data; + detach_buf_packed(vq, i); + return buf; + } + /* That should have freed everything. */ + BUG_ON(vq->num_free != vq->packed.vring.num); + + return NULL; +} + +static void virtqueue_disable_cb_packed(struct virtqueue *_vq) +{ + struct virtqueue_packed *vq = packedvq(_vq); + + if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { + vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; + vq->packed.vring.driver->flags = vq->packed.event_flags_shadow; + } +} + +static inline bool is_used_desc_packed(const struct virtqueue_packed *vq, + u16 idx, bool used_wrap_counter) +{ + bool avail, used; + u16 flags; + + flags = vq->packed.vring.desc[idx].flags; + avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL)); + used = !!(flags & (1 << VRING_PACKED_DESC_F_USED)); + + return avail == used && used == used_wrap_counter; +} + +static inline bool virtqueue_poll_packed(struct virtqueue_packed *vq, u16 off_wrap) +{ + bool wrap_counter; + u16 used_idx; + KeMemoryBarrier(); + + wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; + used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); + + return is_used_desc_packed(vq, used_idx, wrap_counter); + +} + +static inline unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue_packed *vq) +{ + bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled; + /* + * We optimistically turn back on interrupts, then check if there was + * more to do. + */ + + if (event_suppression_enabled) { + vq->packed.vring.driver->off_wrap = + vq->last_used_idx | + (vq->packed.used_wrap_counter << + VRING_PACKED_EVENT_F_WRAP_CTR); + /* + * We need to update event offset and event wrap + * counter first before updating event flags. + */ + KeMemoryBarrier(); + } + + if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { + vq->packed.event_flags_shadow = event_suppression_enabled ? + VRING_PACKED_EVENT_FLAG_DESC : + VRING_PACKED_EVENT_FLAG_ENABLE; + vq->packed.vring.driver->flags = vq->packed.event_flags_shadow; + } + + return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter << + VRING_PACKED_EVENT_F_WRAP_CTR); +} + +static bool virtqueue_enable_cb_packed(struct virtqueue *_vq) +{ + struct virtqueue_packed *vq = packedvq(_vq); + unsigned last_used_idx = virtqueue_enable_cb_prepare_packed(vq); + + return !virtqueue_poll_packed(vq, (u16)last_used_idx); +} + +static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) +{ + struct virtqueue_packed *vq = packedvq(_vq); + bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled; + u16 used_idx, wrap_counter; + u16 bufs; + + /* + * We optimistically turn back on interrupts, then check if there was + * more to do. + */ + + if (event_suppression_enabled) { + /* TODO: tune this threshold */ + bufs = (vq->packed.vring.num - vq->num_free) * 3 / 4; + wrap_counter = vq->packed.used_wrap_counter; + + used_idx = vq->last_used_idx + bufs; + if (used_idx >= vq->packed.vring.num) { + used_idx -= (u16)vq->packed.vring.num; + wrap_counter ^= 1; + } + + vq->packed.vring.driver->off_wrap = used_idx | + (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR); + + /* + * We need to update event offset and event wrap + * counter first before updating event flags. + */ + KeMemoryBarrier(); + } + + if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { + vq->packed.event_flags_shadow = event_suppression_enabled ? + VRING_PACKED_EVENT_FLAG_DESC : + VRING_PACKED_EVENT_FLAG_ENABLE; + vq->packed.vring.driver->flags = vq->packed.event_flags_shadow; + } + + /* + * We need to update event suppression structure first + * before re-checking for more used buffers. + */ + KeMemoryBarrier(); + + if (is_used_desc_packed(vq, + vq->last_used_idx, + vq->packed.used_wrap_counter)) { + return false; + } + + return true; +} + +static BOOLEAN virtqueue_is_interrupt_enabled_packed(struct virtqueue *_vq) +{ + struct virtqueue_packed *vq = packedvq(_vq); + return vq->packed.event_flags_shadow & VRING_PACKED_EVENT_FLAG_DISABLE; +} + +static void virtqueue_shutdown_packed(struct virtqueue *_vq) +{ + struct virtqueue_packed *vq = packedvq(_vq); + unsigned int num = vq->packed.vring.num; + void *pages = vq->packed.vring.desc; + unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES; + + RtlZeroMemory(pages, vring_size_packed(num, vring_align)); + vring_new_virtqueue_packed( + _vq->index, + num, + vring_align, + _vq->vdev, + pages, + _vq->notification_cb, + _vq); +} + +static inline bool more_used_packed(const struct virtqueue_packed *vq) +{ + return is_used_desc_packed(vq, vq->last_used_idx, + vq->packed.used_wrap_counter); +} + +static void *virtqueue_get_buf_packed( + struct virtqueue *_vq, /* the queue */ + unsigned int *len) /* number of bytes returned by the device */ +{ + struct virtqueue_packed *vq = packedvq(_vq); + u16 last_used, id; + void *ret; + + if (!more_used_packed(vq)) { + DPrintf(6, "%s: No more buffers in queue\n", __FUNCTION__); + return NULL; + } + + /* Only get used elements after they have been exposed by host. */ + KeMemoryBarrier(); + + last_used = vq->last_used_idx; + id = vq->packed.vring.desc[last_used].id; + *len = vq->packed.vring.desc[last_used].len; + + if (id >= vq->packed.vring.num) { + BAD_RING(vq, "id %u out of range\n", id); + return NULL; + } + if (!vq->packed.desc_state[id].data) { + BAD_RING(vq, "id %u is not a head!\n", id); + return NULL; + } + + /* detach_buf_packed clears data, so grab it now. */ + ret = vq->packed.desc_state[id].data; + detach_buf_packed(vq, id); + + vq->last_used_idx += vq->packed.desc_state[id].num; + if (vq->last_used_idx >= vq->packed.vring.num) { + vq->last_used_idx -= (u16)vq->packed.vring.num; + vq->packed.used_wrap_counter ^= 1; + } + + /* + * If we expect an interrupt for the next entry, tell host + * by writing event index and flush out the write before + * the read in the next get_buf call. + */ + if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) { + vq->packed.vring.driver->off_wrap = vq->last_used_idx | + ((u16)vq->packed.used_wrap_counter << + VRING_PACKED_EVENT_F_WRAP_CTR); + KeMemoryBarrier(); + } + + return ret; +} + +static BOOLEAN virtqueue_has_buf_packed(struct virtqueue *_vq) +{ + struct virtqueue_packed *vq = packedvq(_vq); + return more_used_packed(vq); +} + +static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq) +{ + struct virtqueue_packed *vq = packedvq(_vq); + u16 new, old, off_wrap, flags, wrap_counter, event_idx; + bool needs_kick; + union { + struct { + __le16 off_wrap; + __le16 flags; + }; + u32 value32; + } snapshot; + + /* + * We need to expose the new flags value before checking notification + * suppressions. + */ + KeMemoryBarrier(); + + old = vq->packed.next_avail_idx - vq->num_added; + new = vq->packed.next_avail_idx; + vq->num_added = 0; + + snapshot.value32 = *(u32 *)vq->packed.vring.device; + flags = snapshot.flags; + + if (flags != VRING_PACKED_EVENT_FLAG_DESC) { + needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE); + goto out; + } + + off_wrap = snapshot.off_wrap; + + wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; + event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); + if (wrap_counter != vq->packed.avail_wrap_counter) + event_idx -= (u16)vq->packed.vring.num; + + needs_kick = vring_need_event(event_idx, new, old); +out: + return needs_kick; +} + +static void virtqueue_kick_always_packed(struct virtqueue *_vq) +{ + struct virtqueue_packed *vq = packedvq(_vq); + KeMemoryBarrier(); + vq->num_added = 0; + virtqueue_notify(_vq); +} + +/* Initializes a new virtqueue using already allocated memory */ +struct virtqueue *vring_new_virtqueue_packed( + unsigned int index, /* virtqueue index */ + unsigned int num, /* virtqueue size (always a power of 2) */ + unsigned int vring_align, /* vring alignment requirement */ + VirtIODevice *vdev, /* the virtio device owning the queue */ + void *pages, /* vring memory */ + void(*notify)(struct virtqueue *), /* notification callback */ + void *control) /* virtqueue memory */ +{ + struct virtqueue_packed *vq = packedvq(control); + unsigned int i; + + vq->vq.vdev = vdev; + vq->vq.notification_cb = notify; + vq->vq.index = index; + + vq->vq.avail_va = (u8 *)pages + num * sizeof(struct vring_packed_desc); + vq->vq.used_va = (u8 *)vq->vq.avail_va + sizeof(struct vring_packed_desc_event); + + /* initialize the ring */ + vq->packed.vring.num = num; + vq->packed.vring.desc = pages; + vq->packed.vring.driver = vq->vq.avail_va; + vq->packed.vring.device = vq->vq.used_va; + + vq->num_free = num; + vq->free_head = 0; + vq->num_added = 0; + vq->packed.avail_wrap_counter = 1; + vq->packed.used_wrap_counter = 1; + vq->last_used_idx = 0; + vq->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; + vq->packed.next_avail_idx = 0; + vq->packed.event_flags_shadow = 0; + vq->packed.desc_state = vq->desc_states; + + RtlZeroMemory(vq->packed.desc_state, num * sizeof(*vq->packed.desc_state)); + for (i = 0; i < num - 1; i++) { + vq->packed.desc_state[i].next = i + 1; + } + + vq->vq.add_buf = virtqueue_add_buf_packed; + vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_packed; + vq->vq.disable_cb = virtqueue_disable_cb_packed; + vq->vq.enable_cb = virtqueue_enable_cb_packed; + vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_packed; + vq->vq.get_buf = virtqueue_get_buf_packed; + vq->vq.has_buf = virtqueue_has_buf_packed; + vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_packed; + vq->vq.kick_always = virtqueue_kick_always_packed; + vq->vq.kick_prepare = virtqueue_kick_prepare_packed; + vq->vq.shutdown = virtqueue_shutdown_packed; + return &vq->vq; +} diff --git a/drivers/network/dd/netkvm/virtio/VirtIORing.c b/drivers/network/dd/netkvm/virtio/VirtIORing.c new file mode 100644 index 00000000000..7a14aa24ce5 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/VirtIORing.c @@ -0,0 +1,562 @@ +/* + * Virtio ring manipulation routines + * + * Copyright 2017 Red Hat, Inc. + * + * Authors: + * Ladi Prosek + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "osdep.h" +#include "virtio_pci.h" +#include "VirtIO.h" +#include "kdebugprint.h" +#include "virtio_ring.h" +#include "windows/virtio_ring_allocation.h" + +#define DESC_INDEX(num, i) ((i) & ((num) - 1)) + + /* This marks a buffer as continuing via the next field. */ +#define VIRTQ_DESC_F_NEXT 1 +/* This marks a buffer as write-only (otherwise read-only). */ +#define VIRTQ_DESC_F_WRITE 2 +/* This means the buffer contains a list of buffer descriptors. */ +#define VIRTQ_DESC_F_INDIRECT 4 + +/* The Host uses this in used->flags to advise the Guest: don't kick me when +* you add a buffer. It's unreliable, so it's simply an optimization. Guest +* will still kick if it's out of buffers. */ +#define VIRTQ_USED_F_NO_NOTIFY 1 +/* The Guest uses this in avail->flags to advise the Host: don't interrupt me +* when you consume a buffer. It's unreliable, so it's simply an +* optimization. */ +#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 + +#pragma warning (push) +#pragma warning (disable:4200) + +#include + +/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ +struct vring_desc { + /* Address (guest-physical). */ + __virtio64 addr; + /* Length. */ + __virtio32 len; + /* The flags as indicated above. */ + __virtio16 flags; + /* We chain unused descriptors via this, too */ + __virtio16 next; +}; + +struct vring_avail { + __virtio16 flags; + __virtio16 idx; + __virtio16 ring[]; +}; + +/* u32 is used here for ids for padding reasons. */ +struct vring_used_elem { + /* Index of start of used descriptor chain. */ + __virtio32 id; + /* Total length of the descriptor chain which was used (written to) */ + __virtio32 len; +}; + +struct vring_used { + __virtio16 flags; + __virtio16 idx; + struct vring_used_elem ring[]; +}; + +#include + +/* Alignment requirements for vring elements. +* When using pre-virtio 1.0 layout, these fall out naturally. +*/ +#define VRING_AVAIL_ALIGN_SIZE 2 +#define VRING_USED_ALIGN_SIZE 4 +#define VRING_DESC_ALIGN_SIZE 16 + +/* The standard layout for the ring is a continuous chunk of memory which looks +* like this. We assume num is a power of 2. +* +* struct vring +* { +* // The actual descriptors (16 bytes each) +* struct vring_desc desc[num]; +* +* // A ring of available descriptor heads with free-running index. +* __virtio16 avail_flags; +* __virtio16 avail_idx; +* __virtio16 available[num]; +* __virtio16 used_event_idx; +* +* // Padding to the next align boundary. +* char pad[]; +* +* // A ring of used descriptor heads with free-running index. +* __virtio16 used_flags; +* __virtio16 used_idx; +* struct vring_used_elem used[num]; +* __virtio16 avail_event_idx; +* }; +*/ +/* We publish the used event index at the end of the available ring, and vice +* versa. They are at the end for backwards compatibility. */ + +struct vring { + unsigned int num; + + struct vring_desc *desc; + + struct vring_avail *avail; + + struct vring_used *used; +}; + +#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) +#define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num]) + +static inline void vring_init(struct vring *vr, unsigned int num, void *p, + unsigned long align) +{ + vr->num = num; + vr->desc = (struct vring_desc *)p; + vr->avail = (struct vring_avail *)((__u8 *)p + num * sizeof(struct vring_desc)); + vr->used = (struct vring_used *)(((ULONG_PTR)&vr->avail->ring[num] + sizeof(__virtio16) + + align - 1) & ~((ULONG_PTR)align - 1)); +} + +static inline unsigned vring_size_split(unsigned int num, unsigned long align) +{ +#pragma warning (push) +#pragma warning (disable:4319) + return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num) + + align - 1) & ~(align - 1)) + + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num; +#pragma warning(pop) +} + +/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ +/* Assuming a given event_idx value from the other side, if +* we have just incremented index from old to new_idx, +* should we trigger an event? */ +static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) +{ + /* Note: Xen has similar logic for notification hold-off + * in include/xen/interface/io/ring.h with req_event and req_prod + * corresponding to event_idx + 1 and new_idx respectively. + * Note also that req_event and req_prod in Xen start at 1, + * event indexes in virtio start at 0. */ + return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); +} + +struct virtqueue_split { + struct virtqueue vq; + struct vring vring; + struct { + u16 flags; + u16 idx; + } master_vring_avail; + unsigned int num_unused; + unsigned int num_added_since_kick; + u16 first_unused; + u16 last_used; + void *opaque[]; +}; + +#define splitvq(vq) ((struct virtqueue_split *)vq) + +#pragma warning (pop) + + /* Returns the index of the first unused descriptor */ +static inline u16 get_unused_desc(struct virtqueue_split *vq) +{ + u16 idx = vq->first_unused; + ASSERT(vq->num_unused > 0); + + vq->first_unused = vq->vring.desc[idx].next; + vq->num_unused--; + return idx; +} + +/* Marks the descriptor chain starting at index idx as unused */ +static inline void put_unused_desc_chain(struct virtqueue_split *vq, u16 idx) +{ + u16 start = idx; + + vq->opaque[idx] = NULL; + while (vq->vring.desc[idx].flags & VIRTQ_DESC_F_NEXT) { + idx = vq->vring.desc[idx].next; + vq->num_unused++; + } + + vq->vring.desc[idx].flags = VIRTQ_DESC_F_NEXT; + vq->vring.desc[idx].next = vq->first_unused; + vq->num_unused++; + + vq->first_unused = start; +} + +/* Adds a buffer to a virtqueue, returns 0 on success, negative number on error */ +static int virtqueue_add_buf_split( + struct virtqueue *_vq, /* the queue */ + struct scatterlist sg[], /* sg array of length out + in */ + unsigned int out, /* number of driver->device buffer descriptors in sg */ + unsigned int in, /* number of device->driver buffer descriptors in sg */ + void *opaque, /* later returned from virtqueue_get_buf */ + void *va_indirect, /* VA of the indirect page or NULL */ + ULONGLONG phys_indirect) /* PA of the indirect page or 0 */ +{ + struct virtqueue_split *vq = splitvq(_vq); + struct vring *vring = &vq->vring; + unsigned int i; + u16 idx; + + if (va_indirect && (out + in) > 1 && vq->num_unused > 0) { + /* Use one indirect descriptor */ + struct vring_desc *desc = (struct vring_desc *)va_indirect; + + for (i = 0; i < out + in; i++) { + desc[i].flags = (i < out ? 0 : VIRTQ_DESC_F_WRITE); + desc[i].flags |= VIRTQ_DESC_F_NEXT; + desc[i].addr = sg[i].physAddr.QuadPart; + desc[i].len = sg[i].length; + desc[i].next = (u16)i + 1; + } + desc[i - 1].flags &= ~VIRTQ_DESC_F_NEXT; + + idx = get_unused_desc(vq); + vq->vring.desc[idx].flags = VIRTQ_DESC_F_INDIRECT; + vq->vring.desc[idx].addr = phys_indirect; + vq->vring.desc[idx].len = i * sizeof(struct vring_desc); + + vq->opaque[idx] = opaque; + } else { + u16 last_idx; + + /* Use out + in regular descriptors */ + if (out + in > vq->num_unused) { + return -ENOSPC; + } + + /* First descriptor */ + idx = last_idx = get_unused_desc(vq); + vq->opaque[idx] = opaque; + + vring->desc[idx].addr = sg[0].physAddr.QuadPart; + vring->desc[idx].len = sg[0].length; + vring->desc[idx].flags = VIRTQ_DESC_F_NEXT; + if (out == 0) { + vring->desc[idx].flags |= VIRTQ_DESC_F_WRITE; + } + vring->desc[idx].next = vq->first_unused; + + /* The rest of descriptors */ + for (i = 1; i < out + in; i++) { + last_idx = get_unused_desc(vq); + + vring->desc[last_idx].addr = sg[i].physAddr.QuadPart; + vring->desc[last_idx].len = sg[i].length; + vring->desc[last_idx].flags = VIRTQ_DESC_F_NEXT; + if (i >= out) { + vring->desc[last_idx].flags |= VIRTQ_DESC_F_WRITE; + } + vring->desc[last_idx].next = vq->first_unused; + } + vring->desc[last_idx].flags &= ~VIRTQ_DESC_F_NEXT; + } + + /* Write the first descriptor into the available ring */ + vring->avail->ring[DESC_INDEX(vring->num, vq->master_vring_avail.idx)] = idx; + KeMemoryBarrier(); + vring->avail->idx = ++vq->master_vring_avail.idx; + vq->num_added_since_kick++; + + return 0; +} + +/* Gets the opaque pointer associated with a returned buffer, or NULL if no buffer is available */ +static void *virtqueue_get_buf_split( + struct virtqueue *_vq, /* the queue */ + unsigned int *len) /* number of bytes returned by the device */ +{ + struct virtqueue_split *vq = splitvq(_vq); + void *opaque; + u16 idx; + + if (vq->last_used == (int)vq->vring.used->idx) { + /* No descriptor index in the used ring */ + return NULL; + } + KeMemoryBarrier(); + + idx = DESC_INDEX(vq->vring.num, vq->last_used); + *len = vq->vring.used->ring[idx].len; + + /* Get the first used descriptor */ + idx = (u16)vq->vring.used->ring[idx].id; + opaque = vq->opaque[idx]; + + /* Put all descriptors back to the free list */ + put_unused_desc_chain(vq, idx); + + vq->last_used++; + if (_vq->vdev->event_suppression_enabled && virtqueue_is_interrupt_enabled(_vq)) { + vring_used_event(&vq->vring) = vq->last_used; + KeMemoryBarrier(); + } + + ASSERT(opaque != NULL); + return opaque; +} + +/* Returns true if at least one returned buffer is available, false otherwise */ +static BOOLEAN virtqueue_has_buf_split(struct virtqueue *_vq) +{ + struct virtqueue_split *vq = splitvq(_vq); + return (vq->last_used != vq->vring.used->idx); +} + +/* Returns true if the device should be notified, false otherwise */ +static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) +{ + struct virtqueue_split *vq = splitvq(_vq); + bool wrap_around; + u16 old, new; + KeMemoryBarrier(); + + wrap_around = (vq->num_added_since_kick >= (1 << 16)); + + old = (u16)(vq->master_vring_avail.idx - vq->num_added_since_kick); + new = vq->master_vring_avail.idx; + vq->num_added_since_kick = 0; + + if (_vq->vdev->event_suppression_enabled) { + return wrap_around || (bool)vring_need_event(vring_avail_event(&vq->vring), new, old); + } else { + return !(vq->vring.used->flags & VIRTQ_USED_F_NO_NOTIFY); + } +} + +/* Notifies the device even if it's not necessary according to the event suppression logic */ +static void virtqueue_kick_always_split(struct virtqueue *_vq) +{ + struct virtqueue_split *vq = splitvq(_vq); + KeMemoryBarrier(); + vq->num_added_since_kick = 0; + virtqueue_notify(_vq); +} + +/* Enables interrupts on a virtqueue and returns false if the queue has at least one returned + * buffer available to be fetched by virtqueue_get_buf, true otherwise */ +static bool virtqueue_enable_cb_split(struct virtqueue *_vq) +{ + struct virtqueue_split *vq = splitvq(_vq); + if (!virtqueue_is_interrupt_enabled(_vq)) { + vq->master_vring_avail.flags &= ~VIRTQ_AVAIL_F_NO_INTERRUPT; + if (!_vq->vdev->event_suppression_enabled) + { + vq->vring.avail->flags = vq->master_vring_avail.flags; + } + } + + vring_used_event(&vq->vring) = vq->last_used; + KeMemoryBarrier(); + return (vq->last_used == vq->vring.used->idx); +} + +/* Enables interrupts on a virtqueue after ~3/4 of the currently pushed buffers have been + * returned, returns false if this condition currently holds, false otherwise */ +static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) +{ + struct virtqueue_split *vq = splitvq(_vq); + u16 bufs; + + if (!virtqueue_is_interrupt_enabled(_vq)) { + vq->master_vring_avail.flags &= ~VIRTQ_AVAIL_F_NO_INTERRUPT; + if (!_vq->vdev->event_suppression_enabled) + { + vq->vring.avail->flags = vq->master_vring_avail.flags; + } + } + + /* Note that 3/4 is an arbitrary threshold */ + bufs = (u16)(vq->master_vring_avail.idx - vq->last_used) * 3 / 4; + vring_used_event(&vq->vring) = vq->last_used + bufs; + KeMemoryBarrier(); + return ((vq->vring.used->idx - vq->last_used) <= bufs); +} + +/* Disables interrupts on a virtqueue */ +static void virtqueue_disable_cb_split(struct virtqueue *_vq) +{ + struct virtqueue_split *vq = splitvq(_vq); + if (virtqueue_is_interrupt_enabled(_vq)) { + vq->master_vring_avail.flags |= VIRTQ_AVAIL_F_NO_INTERRUPT; + if (!_vq->vdev->event_suppression_enabled) + { + vq->vring.avail->flags = vq->master_vring_avail.flags; + } + } +} + +/* Returns true if interrupts are enabled on a virtqueue, false otherwise */ +static BOOLEAN virtqueue_is_interrupt_enabled_split(struct virtqueue *_vq) +{ + struct virtqueue_split *vq = splitvq(_vq); + return !(vq->master_vring_avail.flags & VIRTQ_AVAIL_F_NO_INTERRUPT); +} + +/* Re-initializes an already initialized virtqueue */ +static void virtqueue_shutdown_split(struct virtqueue *_vq) +{ + struct virtqueue_split *vq = splitvq(_vq); + unsigned int num = vq->vring.num; + void *pages = vq->vring.desc; + unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES; + + RtlZeroMemory(pages, vring_size_split(num, vring_align)); + (void)vring_new_virtqueue_split( + _vq->index, + vq->vring.num, + vring_align, + _vq->vdev, + pages, + _vq->notification_cb, + vq); +} + +/* Gets the opaque pointer associated with a not-yet-returned buffer, or NULL if no buffer is available + * to aid drivers with cleaning up all data on virtqueue shutdown */ +static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) +{ + struct virtqueue_split *vq = splitvq(_vq); + u16 idx; + void *opaque = NULL; + + for (idx = 0; idx < (u16)vq->vring.num; idx++) { + opaque = vq->opaque[idx]; + if (opaque) { + put_unused_desc_chain(vq, idx); + vq->vring.avail->idx = --vq->master_vring_avail.idx; + break; + } + } + return opaque; +} + +/* Returns the size of the virtqueue structure including + * additional size for per-descriptor data */ +unsigned int vring_control_block_size(u16 qsize, bool packed) +{ + unsigned int res; + if (packed) { + return vring_control_block_size_packed(qsize); + } + res = sizeof(struct virtqueue_split); + res += sizeof(void *) * qsize; + return res; +} + +/* Initializes a new virtqueue using already allocated memory */ +struct virtqueue *vring_new_virtqueue_split( + unsigned int index, /* virtqueue index */ + unsigned int num, /* virtqueue size (always a power of 2) */ + unsigned int vring_align, /* vring alignment requirement */ + VirtIODevice *vdev, /* the virtio device owning the queue */ + void *pages, /* vring memory */ + void(*notify)(struct virtqueue *), /* notification callback */ + void *control) /* virtqueue memory */ +{ + struct virtqueue_split *vq = splitvq(control); + u16 i; + + if (DESC_INDEX(num, num) != 0) { + DPrintf(0, "Virtqueue length %u is not a power of 2\n", num); + return NULL; + } + + RtlZeroMemory(vq, sizeof(*vq) + num * sizeof(void *)); + + vring_init(&vq->vring, num, pages, vring_align); + vq->vq.vdev = vdev; + vq->vq.notification_cb = notify; + vq->vq.index = index; + + /* Build a linked list of unused descriptors */ + vq->num_unused = num; + vq->first_unused = 0; + for (i = 0; i < num - 1; i++) { + vq->vring.desc[i].flags = VIRTQ_DESC_F_NEXT; + vq->vring.desc[i].next = i + 1; + } + vq->vq.avail_va = vq->vring.avail; + vq->vq.used_va = vq->vring.used; + vq->vq.add_buf = virtqueue_add_buf_split; + vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_split; + vq->vq.disable_cb = virtqueue_disable_cb_split; + vq->vq.enable_cb = virtqueue_enable_cb_split; + vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_split; + vq->vq.get_buf = virtqueue_get_buf_split; + vq->vq.has_buf = virtqueue_has_buf_split; + vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_split; + vq->vq.kick_always = virtqueue_kick_always_split; + vq->vq.kick_prepare = virtqueue_kick_prepare_split; + vq->vq.shutdown = virtqueue_shutdown_split; + return &vq->vq; +} + +/* Negotiates virtio transport features */ +void vring_transport_features( + VirtIODevice *vdev, + u64 *features) /* points to device features on entry and driver accepted features on return */ +{ + unsigned int i; + + for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { + if (i != VIRTIO_RING_F_INDIRECT_DESC && + i != VIRTIO_RING_F_EVENT_IDX && + i != VIRTIO_F_VERSION_1) { + virtio_feature_disable(*features, i); + } + } +} + +/* Returns the max number of scatter-gather elements that fit in an indirect pages */ +u32 virtio_get_indirect_page_capacity() +{ + return PAGE_SIZE / sizeof(struct vring_desc); +} + +unsigned long vring_size(unsigned int num, unsigned long align, bool packed) +{ + if (packed) { + return vring_size_packed(num, align); + } else { + return vring_size_split(num, align); + } +} diff --git a/drivers/network/dd/netkvm/virtio/kdebugprint.h b/drivers/network/dd/netkvm/virtio/kdebugprint.h new file mode 100644 index 00000000000..35d2b1aa2ba --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/kdebugprint.h @@ -0,0 +1,11 @@ +#pragma once + +extern int virtioDebugLevel; +extern int bDebugPrint; +typedef void (*tDebugPrintFunc)(const char *format, ...); +extern tDebugPrintFunc VirtioDebugPrintProc; + +#define DPrintf(Level, MSG, ...) if ((!bDebugPrint) || Level > virtioDebugLevel) {} else VirtioDebugPrintProc(MSG, __VA_ARGS__) + +#define DEBUG_ENTRY(level) DPrintf(level, "[%s]=>\n", __FUNCTION__) +#define DEBUG_EXIT_STATUS(level, status) DPrintf((status == NDIS_STATUS_SUCCESS ? level : 0), "[%s]<=0x%X\n", __FUNCTION__, (status)) diff --git a/drivers/network/dd/netkvm/virtio/linux/types.h b/drivers/network/dd/netkvm/virtio/linux/types.h new file mode 100644 index 00000000000..eb1891aa016 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/linux/types.h @@ -0,0 +1,19 @@ +#ifndef _LINUX_TYPES_H +#define _LINUX_TYPES_H + +#define __bitwise__ +// #define __attribute__(x) + +#define u8 unsigned char +#define u16 unsigned short +#define u32 unsigned long +#define u64 ULONGLONG + +#define __u8 unsigned char +#define __u16 unsigned short +#define __le16 unsigned short +#define __u32 unsigned long +#define __le32 unsigned long +#define __u64 ULONGLONG + +#endif /* _LINUX_TYPES_H */ diff --git a/drivers/network/dd/netkvm/virtio/linux/virtio_config.h b/drivers/network/dd/netkvm/virtio/linux/virtio_config.h new file mode 100644 index 00000000000..bd459199fc6 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/linux/virtio_config.h @@ -0,0 +1,73 @@ +#ifndef _UAPI_LINUX_VIRTIO_CONFIG_H +#define _UAPI_LINUX_VIRTIO_CONFIG_H +/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so + * anyone can use the definitions to implement compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ + +/* Virtio devices use a standardized configuration space to define their + * features and pass configuration information, but each implementation can + * store and access that space differently. */ + +/* Status byte for guest to report progress, and synchronize features. */ +/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */ +#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1 +/* We have found a driver for the device. */ +#define VIRTIO_CONFIG_S_DRIVER 2 +/* Driver has used its parts of the config, and is happy */ +#define VIRTIO_CONFIG_S_DRIVER_OK 4 +/* Driver has finished configuring features */ +#define VIRTIO_CONFIG_S_FEATURES_OK 8 +/* Device entered invalid state, driver SHOULD reset it */ +#define VIRTIO_CONFIG_S_NEEDS_RESET 0x40 +/* We've given up on this device. */ +#define VIRTIO_CONFIG_S_FAILED 0x80 +/* virtio library features bits */ + + +/* Some virtio feature bits (currently bits 28 through 31) are reserved for the + * transport being used (eg. virtio_ring), the rest are per-device feature + * bits. */ +#define VIRTIO_TRANSPORT_F_START 28 +#define VIRTIO_TRANSPORT_F_END 32 + +/* Do we get callbacks when the ring is completely used, even if we've + * suppressed them? */ +#define VIRTIO_F_NOTIFY_ON_EMPTY 24 + +/* Can the device handle any descriptor layout? */ +#define VIRTIO_F_ANY_LAYOUT 27 + +/* v1.0 compliant. */ +#define VIRTIO_F_VERSION_1 32 + +#define VIRTIO_F_IOMMU_PLATFORM 33 + +/* This feature indicates support for the packed virtqueue layout. */ +#define VIRTIO_F_RING_PACKED 34 + +// if this number is not equal to desc size, queue creation fails +#define SIZE_OF_SINGLE_INDIRECT_DESC 16 + +#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ diff --git a/drivers/network/dd/netkvm/virtio/linux/virtio_types.h b/drivers/network/dd/netkvm/virtio/linux/virtio_types.h new file mode 100644 index 00000000000..fcda3e10a23 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/linux/virtio_types.h @@ -0,0 +1,47 @@ +#ifndef _UAPI_LINUX_VIRTIO_TYPES_H +#define _UAPI_LINUX_VIRTIO_TYPES_H +/* Type definitions for virtio implementations. +* +* This header is BSD licensed so anyone can use the definitions to implement +* compatible drivers/servers. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* 1. Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* 2. Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* 3. Neither the name of IBM nor the names of its contributors +* may be used to endorse or promote products derived from this software +* without specific prior written permission. +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND +* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE +* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +* SUCH DAMAGE. +* +* Copyright (C) 2014 Red Hat, Inc. +* Author: Michael S. Tsirkin +*/ + +#include "linux/types.h" + +/* +* __virtio{16,32,64} have the following meaning: +* - __u{16,32,64} for virtio devices in legacy mode, accessed in native endian +* - __le{16,32,64} for standard-compliant virtio devices +*/ + +typedef __u16 __bitwise__ __virtio16; +typedef __u32 __bitwise__ __virtio32; +typedef __u64 __bitwise__ __virtio64; + +#endif /* _UAPI_LINUX_VIRTIO_TYPES_H */ diff --git a/drivers/network/dd/netkvm/virtio/osdep.h b/drivers/network/dd/netkvm/virtio/osdep.h new file mode 100644 index 00000000000..b1c98145060 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/osdep.h @@ -0,0 +1,39 @@ +////////////////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2007 Qumranet All Rights Reserved +// +// Module Name: +// osdep.h +// +// Abstract: +// Windows OS dependent definitions of data types +// +// Author: +// Yan Vugenfirer - February 2007. +// +////////////////////////////////////////////////////////////////////////////////////////// + +#pragma once + +#include + +#ifdef __REACTOS__ +#ifdef __GNUC__ +#undef FORCEINLINE +#define FORCEINLINE __attribute__((__always_inline__)) +#endif +#endif + +#ifndef __REACTOS__ +#define ENOSPC 1 +#endif + +#if !defined(__cplusplus) && !defined(bool) +// Important note: in MSFT C++ bool length is 1 bytes +// C++ does not define length of bool +// inconsistent definition of 'bool' may create compatibility problems +#define bool u8 +#define false FALSE +#define true TRUE +#endif + +#define SMP_CACHE_BYTES 64 diff --git a/drivers/network/dd/netkvm/virtio/virtio_pci.h b/drivers/network/dd/netkvm/virtio/virtio_pci.h new file mode 100644 index 00000000000..d931f49a224 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/virtio_pci.h @@ -0,0 +1,392 @@ +/* +* Virtio PCI driver +* +* This module allows virtio devices to be used over a virtual PCI device. +* This can be used with QEMU based VMMs like KVM or Xen. +* +* Copyright IBM Corp. 2007 +* +* Authors: +* Anthony Liguori +* +* This header is BSD licensed so anyone can use the definitions to implement +* compatible drivers/servers. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* 1. Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* 2. Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* 3. Neither the name of IBM nor the names of its contributors +* may be used to endorse or promote products derived from this software +* without specific prior written permission. +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND +* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE +* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +* SUCH DAMAGE. +*/ + +#ifndef _LINUX_VIRTIO_PCI_H +#define _LINUX_VIRTIO_PCI_H + +#include "linux/types.h" +#include "linux/virtio_config.h" + +#ifndef VIRTIO_PCI_NO_LEGACY + +/* A 32-bit r/o bitmask of the features supported by the host */ +#define VIRTIO_PCI_HOST_FEATURES 0 + +/* A 32-bit r/w bitmask of features activated by the guest */ +#define VIRTIO_PCI_GUEST_FEATURES 4 + +/* A 32-bit r/w PFN for the currently selected queue */ +#define VIRTIO_PCI_QUEUE_PFN 8 + +/* A 16-bit r/o queue size for the currently selected queue */ +#define VIRTIO_PCI_QUEUE_NUM 12 + +/* A 16-bit r/w queue selector */ +#define VIRTIO_PCI_QUEUE_SEL 14 + +/* A 16-bit r/w queue notifier */ +#define VIRTIO_PCI_QUEUE_NOTIFY 16 + +/* An 8-bit device status register. */ +#define VIRTIO_PCI_STATUS 18 + +/* An 8-bit r/o interrupt status register. Reading the value will return the +* current contents of the ISR and will also clear it. This is effectively +* a read-and-acknowledge. */ +#define VIRTIO_PCI_ISR 19 + +/* MSI-X registers: only enabled if MSI-X is enabled. */ +/* A 16-bit vector for configuration changes. */ +#define VIRTIO_MSI_CONFIG_VECTOR 20 +/* A 16-bit vector for selected queue notifications. */ +#define VIRTIO_MSI_QUEUE_VECTOR 22 + +/* The remaining space is defined by each driver as the per-driver +* configuration space */ +#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) +/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ +#define VIRTIO_PCI_CONFIG(msix_enabled) VIRTIO_PCI_CONFIG_OFF(msix_enabled) + +/* How many bits to shift physical queue address written to QUEUE_PFN. +* 12 is historical, and due to x86 page size. */ +#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 + +/* The alignment to use between consumer and producer parts of vring. +* x86 pagesize again. */ +#define VIRTIO_PCI_VRING_ALIGN 4096 + +#endif /* VIRTIO_PCI_NO_LEGACY */ + +/* The bit of the ISR which indicates a device configuration change. */ +#define VIRTIO_PCI_ISR_CONFIG 0x2 +/* Vector value used to disable MSI for queue */ +#define VIRTIO_MSI_NO_VECTOR 0xffff + +/* IDs for different capabilities. Must all exist. */ + +/* Common configuration */ +#define VIRTIO_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 +/* ISR access */ +#define VIRTIO_PCI_CAP_ISR_CFG 3 +/* Device specific configuration */ +#define VIRTIO_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define VIRTIO_PCI_CAP_PCI_CFG 5 + +/* This is the PCI capability header: */ +struct virtio_pci_cap { + __u8 cap_vndr; /* Generic PCI field: PCI_CAPABILITY_ID_VENDOR_SPECIFIC */ + __u8 cap_next; /* Generic PCI field: next ptr. */ + __u8 cap_len; /* Generic PCI field: capability length */ + __u8 cfg_type; /* Identifies the structure. */ + __u8 bar; /* Where to find it. */ + __u8 padding[3]; /* Pad to full dword. */ + __le32 offset; /* Offset within bar. */ + __le32 length; /* Length of the structure, in bytes. */ +}; + +struct virtio_pci_notify_cap { + struct virtio_pci_cap cap; + __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + +/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */ +struct virtio_pci_common_cfg { + /* About the whole device. */ + __le32 device_feature_select; /* read-write */ + __le32 device_feature; /* read-only */ + __le32 guest_feature_select; /* read-write */ + __le32 guest_feature; /* read-write */ + __le16 msix_config; /* read-write */ + __le16 num_queues; /* read-only */ + __u8 device_status; /* read-write */ + __u8 config_generation; /* read-only */ + + /* About a specific virtqueue. */ + __le16 queue_select; /* read-write */ + __le16 queue_size; /* read-write, power of 2. */ + __le16 queue_msix_vector; /* read-write */ + __le16 queue_enable; /* read-write */ + __le16 queue_notify_off; /* read-only */ + __le32 queue_desc_lo; /* read-write */ + __le32 queue_desc_hi; /* read-write */ + __le32 queue_avail_lo; /* read-write */ + __le32 queue_avail_hi; /* read-write */ + __le32 queue_used_lo; /* read-write */ + __le32 queue_used_hi; /* read-write */ +}; + +#define MAX_QUEUES_PER_DEVICE_DEFAULT 8 + +typedef struct virtio_queue_info +{ + /* the actual virtqueue */ + struct virtqueue *vq; + /* the number of entries in the queue */ + u16 num; + /* the virtual address of the ring queue */ + void *queue; +} VirtIOQueueInfo; + +typedef struct virtio_system_ops { + // device register access + u8 (*vdev_read_byte)(ULONG_PTR ulRegister); + u16 (*vdev_read_word)(ULONG_PTR ulRegister); + u32 (*vdev_read_dword)(ULONG_PTR ulRegister); + void (*vdev_write_byte)(ULONG_PTR ulRegister, u8 bValue); + void (*vdev_write_word)(ULONG_PTR ulRegister, u16 wValue); + void (*vdev_write_dword)(ULONG_PTR ulRegister, u32 ulValue); + + // memory management + void *(*mem_alloc_contiguous_pages)(void *context, size_t size); + void (*mem_free_contiguous_pages)(void *context, void *virt); + ULONGLONG (*mem_get_physical_address)(void *context, void *virt); + void *(*mem_alloc_nonpaged_block)(void *context, size_t size); + void (*mem_free_nonpaged_block)(void *context, void *addr); + + // PCI config space access + int (*pci_read_config_byte)(void *context, int where, u8 *bVal); + int (*pci_read_config_word)(void *context, int where, u16 *wVal); + int (*pci_read_config_dword)(void *context, int where, u32 *dwVal); + + // PCI resource handling + size_t (*pci_get_resource_len)(void *context, int bar); + void *(*pci_map_address_range)(void *context, int bar, size_t offset, size_t maxlen); + + // misc + u16 (*vdev_get_msix_vector)(void *context, int queue); + void (*vdev_sleep)(void *context, unsigned int msecs); +} VirtIOSystemOps; + +struct virtio_device; +typedef struct virtio_device VirtIODevice; + +struct virtio_device_ops +{ + // read/write device config and read config generation counter + void (*get_config)(VirtIODevice *vdev, unsigned offset, void *buf, unsigned len); + void (*set_config)(VirtIODevice *vdev, unsigned offset, const void *buf, unsigned len); + u32 (*get_config_generation)(VirtIODevice *vdev); + + // read/write device status byte and reset the device + u8 (*get_status)(VirtIODevice *vdev); + void (*set_status)(VirtIODevice *vdev, u8 status); + void (*reset)(VirtIODevice *vdev); + + // get/set device feature bits + u64 (*get_features)(VirtIODevice *vdev); + NTSTATUS (*set_features)(VirtIODevice *vdev, u64 features); + + // set config/queue MSI interrupt vector, returns the new vector + u16 (*set_config_vector)(VirtIODevice *vdev, u16 vector); + u16 (*set_queue_vector)(struct virtqueue *vq, u16 vector); + + // query virtual queue size and memory requirements + NTSTATUS (*query_queue_alloc)(VirtIODevice *vdev, + unsigned index, unsigned short *pNumEntries, + unsigned long *pRingSize, + unsigned long *pHeapSize); + + // allocate and initialize a queue + NTSTATUS (*setup_queue)(struct virtqueue **queue, + VirtIODevice *vdev, VirtIOQueueInfo *info, + unsigned idx, u16 msix_vec); + + // tear down and deallocate a queue + void (*delete_queue)(VirtIOQueueInfo *info); +}; + +struct virtio_device +{ + // the I/O port BAR of the PCI device (legacy virtio devices only) + ULONG_PTR addr; + + // true if the device uses MSI interrupts + bool msix_used; + + // true if the VIRTIO_RING_F_EVENT_IDX feature flag has been negotiated + bool event_suppression_enabled; + + // true if the VIRTIO_F_RING_PACKED feature flag has been negotiated + bool packed_ring; + + // internal device operations, implemented separately for legacy and modern + const struct virtio_device_ops *device; + + // external callbacks implemented separately by different driver model drivers + const struct virtio_system_ops *system; + + // opaque context value passed as first argument to virtio_system_ops callbacks + void *DeviceContext; + + // the ISR status field, reading causes the device to de-assert an interrupt + volatile u8 *isr; + + // modern virtio device capabilities and related state + volatile struct virtio_pci_common_cfg *common; + volatile unsigned char *config; + volatile unsigned char *notify_base; + int notify_map_cap; + u32 notify_offset_multiplier; + + size_t config_len; + size_t notify_len; + + // maximum number of virtqueues that fit in the memory block pointed to by info + ULONG maxQueues; + + // points to inline_info if not more than MAX_QUEUES_PER_DEVICE_DEFAULT queues + // are used, or to an external allocation otherwise + VirtIOQueueInfo *info; + VirtIOQueueInfo inline_info[MAX_QUEUES_PER_DEVICE_DEFAULT]; +}; + +/* Driver API: device init and shutdown + * DeviceContext is a driver defined opaque value which will be passed to driver + * supplied callbacks described in pSystemOps. pSystemOps must be non-NULL and all + * its fields must be non-NULL. msix_used is true if and only if the device is + * configured with MSI support. + */ +NTSTATUS virtio_device_initialize(VirtIODevice *vdev, + const VirtIOSystemOps *pSystemOps, + void *DeviceContext, + bool msix_used); +void virtio_device_shutdown(VirtIODevice *vdev); + +/* Driver API: device status manipulation + * virtio_set_status should not be called by new drivers. Device status should only + * be getting its bits set with virtio_add_status and reset all back to 0 with + * virtio_device_reset. virtio_device_ready is a special version of virtio_add_status + * which adds the VIRTIO_CONFIG_S_DRIVER_OK status bit. + */ +u8 virtio_get_status(VirtIODevice *vdev); +void virtio_set_status(VirtIODevice *vdev, u8 status); +void virtio_add_status(VirtIODevice *vdev, u8 status); + +void virtio_device_reset(VirtIODevice *vdev); +void virtio_device_ready(VirtIODevice *vdev); + +/* Driver API: device feature bitmap manipulation + * Features passed to virtio_set_features should be a subset of features offered by + * the device as returned from virtio_get_features. virtio_set_features sets the + * VIRTIO_CONFIG_S_FEATURES_OK status bit if it is supported by the device. + */ +#define virtio_is_feature_enabled(FeaturesList, Feature) (!!((FeaturesList) & (1ULL << (Feature)))) +#define virtio_feature_enable(FeaturesList, Feature) ((FeaturesList) |= (1ULL << (Feature))) +#define virtio_feature_disable(FeaturesList, Feature) ((FeaturesList) &= ~(1ULL << (Feature))) + +u64 virtio_get_features(VirtIODevice *dev); +NTSTATUS virtio_set_features(VirtIODevice *vdev, u64 features); + +/* Driver API: device configuration access + * Both virtio_get_config and virtio_set_config support arbitrary values of the len + * parameter. Config items of length 1, 2, and 4 are read/written using one access, + * length 8 is broken down to two 4 bytes accesses, and any other length is read or + * written byte by byte. + */ +void virtio_get_config(VirtIODevice *vdev, unsigned offset, + void *buf, unsigned len); +void virtio_set_config(VirtIODevice *vdev, unsigned offset, + void *buf, unsigned len); + +/* Driver API: virtqueue setup + * virtio_reserve_queue_memory makes VirtioLib reserve memory for its virtqueue + * bookkeeping. Drivers should call this function if they intend to set up queues + * one by one with virtio_find_queue. virtio_find_queues (plural) internally takes + * care of the reservation and virtio_reserve_queue_memory need not be called. + * Note that in addition to queue interrupt vectors, virtio_find_queues also sets + * up the device config vector as a convenience. + * Drivers should treat the returned struct virtqueue pointers as opaque handles. + */ +NTSTATUS virtio_query_queue_allocation(VirtIODevice *vdev, unsigned index, + unsigned short *pNumEntries, + unsigned long *pRingSize, + unsigned long *pHeapSize); + +NTSTATUS virtio_reserve_queue_memory(VirtIODevice *vdev, unsigned nvqs); + +NTSTATUS virtio_find_queue(VirtIODevice *vdev, unsigned index, + struct virtqueue **vq); +NTSTATUS virtio_find_queues(VirtIODevice *vdev, unsigned nvqs, + struct virtqueue *vqs[]); + +/* Driver API: virtqueue shutdown + * The device must be reset and re-initialized to re-setup queues after they have + * been deleted. + */ +void virtio_delete_queue(struct virtqueue *vq); +void virtio_delete_queues(VirtIODevice *vdev); + +/* Driver API: virtqueue query and manipulation + * virtio_get_queue_descriptor_size + * is useful in situations where the driver has to prepare for the memory allocation + * performed by virtio_reserve_queue_memory beforehand. + */ + +u32 virtio_get_queue_size(struct virtqueue *vq); +unsigned long virtio_get_indirect_page_capacity(); + +static ULONG FORCEINLINE virtio_get_queue_descriptor_size() +{ + return sizeof(VirtIOQueueInfo); +} + +/* Driver API: interrupt handling + * virtio_set_config_vector and virtio_set_queue_vector set the MSI vector used for + * device configuration interrupt and queue interrupt, respectively. The driver may + * choose to either return the vector from the vdev_get_msix_vector callback (called + * as part of queue setup) or call these functions later. Note that setting the vector + * may fail which is indicated by the return value of VIRTIO_MSI_NO_VECTOR. + * virtio_read_isr_status returns the value of the ISR status register, note that it + * is not idempotent, calling the function makes the device de-assert the interrupt. + */ +u16 virtio_set_config_vector(VirtIODevice *vdev, u16 vector); +u16 virtio_set_queue_vector(struct virtqueue *vq, u16 vector); + +u8 virtio_read_isr_status(VirtIODevice *vdev); + +/* Driver API: miscellaneous helpers + * virtio_get_bar_index returns the corresponding BAR index given its physical address. + * This tends to be useful to all drivers since Windows doesn't provide reliable BAR + * indices as part of resource enumeration. The function returns -1 on failure. + */ +int virtio_get_bar_index(PPCI_COMMON_HEADER pPCIHeader, PHYSICAL_ADDRESS BasePA); + +#endif diff --git a/drivers/network/dd/netkvm/virtio/virtio_pci_common.h b/drivers/network/dd/netkvm/virtio/virtio_pci_common.h new file mode 100644 index 00000000000..16720835ed2 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/virtio_pci_common.h @@ -0,0 +1,88 @@ +#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H +#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H +/* + * Virtio PCI driver - APIs for common functionality for all device versions + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori + * Rusty Russell + * Michael S. Tsirkin + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#define ioread8(vdev, addr) \ + vdev->system->vdev_read_byte((ULONG_PTR)(addr)) +#define ioread16(vdev, addr) \ + vdev->system->vdev_read_word((ULONG_PTR)(addr)) +#define ioread32(vdev, addr) \ + vdev->system->vdev_read_dword((ULONG_PTR)(addr)) +#define iowrite8(vdev, val, addr) \ + vdev->system->vdev_write_byte((ULONG_PTR)(addr), val) +#define iowrite16(vdev, val, addr) \ + vdev->system->vdev_write_word((ULONG_PTR)(addr), val) +#define iowrite32(vdev, val, addr) \ + vdev->system->vdev_write_dword((ULONG_PTR)(addr), val) +#define iowrite64_twopart(vdev, val, lo_addr, hi_addr) \ + vdev->system->vdev_write_dword((ULONG_PTR)(lo_addr), (u32)(val)); \ + vdev->system->vdev_write_dword((ULONG_PTR)(hi_addr), (val) >> 32) + +#define mem_alloc_contiguous_pages(vdev, size) \ + vdev->system->mem_alloc_contiguous_pages(vdev->DeviceContext, size) +#define mem_free_contiguous_pages(vdev, virt) \ + vdev->system->mem_free_contiguous_pages(vdev->DeviceContext, virt) +#define mem_get_physical_address(vdev, virt) \ + vdev->system->mem_get_physical_address(vdev->DeviceContext, virt) +#define mem_alloc_nonpaged_block(vdev, size) \ + vdev->system->mem_alloc_nonpaged_block(vdev->DeviceContext, size) +#define mem_free_nonpaged_block(vdev, addr) \ + vdev->system->mem_free_nonpaged_block(vdev->DeviceContext, addr) + +#define pci_read_config_byte(vdev, where, bVal) \ + vdev->system->pci_read_config_byte(vdev->DeviceContext, where, bVal) +#define pci_read_config_word(vdev, where, wVal) \ + vdev->system->pci_read_config_word(vdev->DeviceContext, where, wVal) +#define pci_read_config_dword(vdev, where, dwVal) \ + vdev->system->pci_read_config_dword(vdev->DeviceContext, where, dwVal) + +#define pci_get_resource_len(vdev, bar) \ + vdev->system->pci_get_resource_len(vdev->DeviceContext, bar) +#define pci_map_address_range(vdev, bar, offset, maxlen) \ + vdev->system->pci_map_address_range(vdev->DeviceContext, bar, offset, maxlen) + +#define vdev_get_msix_vector(vdev, queue) \ + vdev->system->vdev_get_msix_vector(vdev->DeviceContext, queue) +#define vdev_sleep(vdev, msecs) \ + vdev->system->vdev_sleep(vdev->DeviceContext, msecs) + +/* the notify function used when creating a virt queue */ +void vp_notify(struct virtqueue *vq); + +NTSTATUS vio_legacy_initialize(VirtIODevice *vdev); +NTSTATUS vio_modern_initialize(VirtIODevice *vdev); + +#endif \ No newline at end of file diff --git a/drivers/network/dd/netkvm/virtio/virtio_ring.h b/drivers/network/dd/netkvm/virtio/virtio_ring.h new file mode 100644 index 00000000000..e710da5f16b --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/virtio_ring.h @@ -0,0 +1,50 @@ +#ifndef _UAPI_LINUX_VIRTIO_RING_H +#define _UAPI_LINUX_VIRTIO_RING_H +/* An interface for efficient virtio implementation, currently for use by KVM +* and lguest, but hopefully others soon. Do NOT change this since it will +* break existing servers and clients. +* +* This header is BSD licensed so anyone can use the definitions to implement +* compatible drivers/servers. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* 1. Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* 2. Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* 3. Neither the name of IBM nor the names of its contributors +* may be used to endorse or promote products derived from this software +* without specific prior written permission. +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND +* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE +* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +* SUCH DAMAGE. +* +* Copyright Rusty Russell IBM Corporation 2007. */ + +#include "linux/types.h" +#include "linux/virtio_types.h" + +/* We support indirect buffer descriptors */ +#define VIRTIO_RING_F_INDIRECT_DESC 28 + +/* The Guest publishes the used index for which it expects an interrupt +* at the end of the avail ring. Host should ignore the avail->flags field. */ +/* The Host publishes the avail index for which it expects a kick +* at the end of the used ring. Guest should ignore the used->flags field. */ +#define VIRTIO_RING_F_EVENT_IDX 29 + +void vring_transport_features(VirtIODevice *vdev, u64 *features); +unsigned long vring_size(unsigned int num, unsigned long align, bool packed); + +#endif /* _UAPI_LINUX_VIRTIO_RING_H */ diff --git a/drivers/network/dd/netkvm/virtio/windows/virtio_ring_allocation.h b/drivers/network/dd/netkvm/virtio/windows/virtio_ring_allocation.h new file mode 100644 index 00000000000..39fe1e7b6a6 --- /dev/null +++ b/drivers/network/dd/netkvm/virtio/windows/virtio_ring_allocation.h @@ -0,0 +1,24 @@ +#ifndef _VIRTIO_RING_ALLOCATION_H +#define _VIRTIO_RING_ALLOCATION_H + +struct virtqueue *vring_new_virtqueue_split(unsigned int index, + unsigned int num, + unsigned int vring_align, + VirtIODevice *vdev, + void *pages, + void (*notify)(struct virtqueue *), + void *control); + +struct virtqueue *vring_new_virtqueue_packed(unsigned int index, + unsigned int num, + unsigned int vring_align, + VirtIODevice *vdev, + void *pages, + void (*notify)(struct virtqueue *), + void *control); + +unsigned int vring_control_block_size(u16 qsize, bool packed); +unsigned int vring_control_block_size_packed(u16 qsize); +unsigned long vring_size_packed(unsigned int num, unsigned long align); + +#endif /* _VIRTIO_RING_ALLOCATION_H */ diff --git a/drivers/network/dd/netkvm/wxp/ParaNdis5-Driver.c b/drivers/network/dd/netkvm/wxp/ParaNdis5-Driver.c new file mode 100644 index 00000000000..c975c7785c2 --- /dev/null +++ b/drivers/network/dd/netkvm/wxp/ParaNdis5-Driver.c @@ -0,0 +1,452 @@ +/* + * This file contains driver-related part of NDIS5.X adapter driver. + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "ParaNdis5.h" + +//#define NO_XP_POWER_MANAGEMENT + +#ifdef WPP_EVENT_TRACING +#include "ParaNdis5-Driver.tmh" +#endif + +static NDIS_HANDLE DriverHandle; +static ULONG gID = 0; + +/****************************************************** +Unload handler, only responsibility is cleanup WPP +*******************************************************/ +static VOID NTAPI ParaVirtualNICUnload(IN PDRIVER_OBJECT pDriverObject) +{ + DEBUG_ENTRY(0); + ParaNdis_DebugCleanup(pDriverObject); +} + +/************************************************************* +Required NDIS function +Responsible to put the adapter to known (initial) hardware state + +Do not call any NDIS functions +*************************************************************/ +static VOID NTAPI ParaNdis5_Shutdown(IN NDIS_HANDLE MiniportAdapterContext) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + ParaNdis_OnShutdown(pContext); +} + +/****************************************************** +Required NDIS procedure +Allocates and initializes adapter context +Finally sets send and receive to Enabled state and reports connect +Returns: +NDIS_STATUS SUCCESS or some error code +*******************************************************/ +static NDIS_STATUS NTAPI ParaNdis5_Initialize(OUT PNDIS_STATUS OpenErrorStatus, + OUT PUINT SelectedMediumIndex, + IN PNDIS_MEDIUM MediumArray, + IN UINT MediumArraySize, + IN NDIS_HANDLE MiniportAdapterHandle, + IN NDIS_HANDLE WrapperConfigurationContext) +{ + NDIS_STATUS status = NDIS_STATUS_UNSUPPORTED_MEDIA; + PARANDIS_ADAPTER *pContext = NULL; + UINT i; + for(i = 0; i < MediumArraySize; ++i) + { + if(MediumArray[i] == NdisMedium802_3) + { + *SelectedMediumIndex = i; + status = NDIS_STATUS_SUCCESS; + break; + } + } + + if (status == NDIS_STATUS_SUCCESS) + { + pContext = + (PARANDIS_ADAPTER *)ParaNdis_AllocateMemory(NULL, sizeof(PARANDIS_ADAPTER)); + if (!pContext) + { + status = NDIS_STATUS_RESOURCES; + } + } + + if (status == NDIS_STATUS_SUCCESS) + { + PVOID pResourceList = &status; + UINT uSize = 0; + NdisZeroMemory(pContext, sizeof(PARANDIS_ADAPTER)); + pContext->ulUniqueID = NdisInterlockedIncrement(&gID); + pContext->DriverHandle = DriverHandle; + pContext->MiniportHandle = MiniportAdapterHandle; + pContext->WrapperConfigurationHandle = WrapperConfigurationContext; + NdisMQueryAdapterResources(&status, WrapperConfigurationContext, pResourceList, &uSize); + if (uSize > 0) + pResourceList = ParaNdis_AllocateMemory(MiniportAdapterHandle, uSize); + else + pResourceList = NULL; + if (!pResourceList) + status = uSize > 0 ? NDIS_STATUS_RESOURCES : NDIS_STATUS_FAILURE; + else + { + ULONG attributes; + attributes = NDIS_ATTRIBUTE_DESERIALIZE | NDIS_ATTRIBUTE_BUS_MASTER; + // in XP SP2, if this flag is NOT set, the NDIS halts miniport + // upon transition to S1..S4. + // it seems that XP SP3 ignores it and always sends SET_POWER to D3 +#ifndef NO_XP_POWER_MANAGEMENT + attributes |= NDIS_ATTRIBUTE_NO_HALT_ON_SUSPEND; +#endif + NdisMSetAttributesEx( + MiniportAdapterHandle, + pContext, + 0, + attributes, + NdisInterfacePci); + NdisMQueryAdapterResources(&status, WrapperConfigurationContext, pResourceList, &uSize); + status = ParaNdis_InitializeContext(pContext, (PNDIS_RESOURCE_LIST)pResourceList); + NdisFreeMemory(pResourceList, 0, 0); + } + } + + if (status == NDIS_STATUS_SUCCESS) + { + status = ParaNdis_FinishInitialization(pContext); + if (status == NDIS_STATUS_SUCCESS) + { + ParaNdis_DebugRegisterMiniport(pContext, TRUE); + ParaNdis_IndicateConnect(pContext, FALSE, TRUE); + ParaNdis5_StopSend(pContext, FALSE, NULL); + ParaNdis5_StopReceive(pContext, FALSE, NULL); + if (!pContext->ulMilliesToConnect) + { + ParaNdis_ReportLinkStatus(pContext, FALSE); + } + else + { + NdisSetTimer(&pContext->ConnectTimer, pContext->ulMilliesToConnect); + } + } + else + { + ParaNdis_CleanupContext(pContext); + } + } + + if (status != NDIS_STATUS_SUCCESS && pContext) + { + NdisFreeMemory(pContext, 0, 0); + } + + DEBUG_EXIT_STATUS(0, status); + return status; +} + + +/************************************************************* +Callback of delayed receive pause procedure upon reset request +*************************************************************/ +static void OnReceiveStoppedOnReset(VOID *p) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)p; + DEBUG_ENTRY(0); + NdisSetEvent(&pContext->ResetEvent); +} + +/************************************************************* +Callback of delayed send pause procedure upon reset request +*************************************************************/ +static void OnSendStoppedOnReset(VOID *p) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)p; + DEBUG_ENTRY(0); + NdisSetEvent(&pContext->ResetEvent); +} + +VOID ParaNdis_Suspend(PARANDIS_ADAPTER *pContext) +{ + DEBUG_ENTRY(0); + NdisResetEvent(&pContext->ResetEvent); + if (NDIS_STATUS_PENDING != ParaNdis5_StopSend(pContext, TRUE, OnSendStoppedOnReset)) + { + NdisSetEvent(&pContext->ResetEvent); + } + NdisWaitEvent(&pContext->ResetEvent, 0); + NdisResetEvent(&pContext->ResetEvent); + if (NDIS_STATUS_PENDING != ParaNdis5_StopReceive(pContext, TRUE, OnReceiveStoppedOnReset)) + { + NdisSetEvent(&pContext->ResetEvent); + } + NdisWaitEvent(&pContext->ResetEvent, 0); + NdisResetEvent(&pContext->ResetEvent); + DEBUG_EXIT_STATUS(0, 0); +} + +VOID ParaNdis_Resume(PARANDIS_ADAPTER *pContext) +{ + ParaNdis5_StopSend(pContext, FALSE, NULL); + ParaNdis5_StopReceive(pContext, FALSE, NULL); + DEBUG_EXIT_STATUS(0, 0); +} + +static void NTAPI OnResetWorkItem(NDIS_WORK_ITEM * pWorkItem, PVOID Context) +{ + tGeneralWorkItem *pwi = (tGeneralWorkItem *)pWorkItem; + PARANDIS_ADAPTER *pContext = pwi->pContext; + DEBUG_ENTRY(0); + + pContext->bResetInProgress = TRUE; + ParaNdis_IndicateConnect(pContext, FALSE, FALSE); + ParaNdis_Suspend(pContext); + ParaNdis_Resume(pContext); + pContext->bResetInProgress = FALSE; + ParaNdis_ReportLinkStatus(pContext, FALSE); + + NdisFreeMemory(pwi, 0, 0); + ParaNdis_DebugHistory(pContext, hopSysReset, NULL, 0, NDIS_STATUS_SUCCESS, 0); + NdisMResetComplete(pContext->MiniportHandle, NDIS_STATUS_SUCCESS, TRUE); +} + + +/************************************************************* +Required NDIS procedure +Called when some procedure (like OID handler) returns PENDING and +does not complete or when CheckForHang return TRUE +*************************************************************/ +static NDIS_STATUS NTAPI ParaNdis5_Reset( + OUT PBOOLEAN AddressingReset, + IN NDIS_HANDLE MiniportAdapterContext) +{ + NDIS_STATUS status; + tGeneralWorkItem *pwi; + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + DEBUG_ENTRY(0); + ParaNdis_DebugHistory(pContext, hopSysReset, NULL, 1, 0, 0); + status = NDIS_STATUS_FAILURE; + pwi = ParaNdis_AllocateMemory(pContext, sizeof(tGeneralWorkItem)); + if (pwi) + { + pwi->pContext = pContext; + NdisInitializeWorkItem(&pwi->wi, OnResetWorkItem, pwi); + if (NdisScheduleWorkItem(&pwi->wi) == NDIS_STATUS_SUCCESS) + { + status = NDIS_STATUS_PENDING; + } + else + { + NdisFreeMemory(pwi, 0, 0); + } + } + if (status != NDIS_STATUS_PENDING) + { + ParaNdis_DebugHistory(pContext, hopSysReset, NULL, 0, status, 0); + } + return status; +} + +/************************************************************* +Callback of delayed receive pause procedure +*************************************************************/ +static VOID OnReceiveStopped(VOID *p) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)p; + DEBUG_ENTRY(0); + NdisSetEvent(&pContext->HaltEvent); +} + +/************************************************************* +Callback of delayed send pause procedure +*************************************************************/ +static VOID OnSendStopped(VOID *p) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)p; + DEBUG_ENTRY(0); + NdisSetEvent(&pContext->HaltEvent); +} + +static void WaitHaltEvent(PARANDIS_ADAPTER *pContext, const char *Reason) +{ + UINT ms = 5000; + if (!NdisWaitEvent(&pContext->HaltEvent, 1)) + { + while (!NdisWaitEvent(&pContext->HaltEvent, ms)) + { + DPrintf(0, ("[%s]", __FUNCTION__)); + } + } +} + +/************************************************************* +Required NDIS procedure +Stops TX and RX path and finished the function of adapter +*************************************************************/ +static VOID NTAPI ParaNdis5_Halt( + IN NDIS_HANDLE MiniportAdapterContext) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + BOOLEAN bUnused; + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + DEBUG_ENTRY(0); + + ParaNdis_DebugHistory(pContext, hopHalt, NULL, 1, 0, 0); + + NdisCancelTimer(&pContext->ConnectTimer, &bUnused); + NdisResetEvent(&pContext->HaltEvent); + if (NDIS_STATUS_PENDING != ParaNdis5_StopSend(pContext, TRUE, OnSendStopped)) + NdisSetEvent(&pContext->HaltEvent); + WaitHaltEvent(pContext, "Send"); + NdisResetEvent(&pContext->HaltEvent); + if (NDIS_STATUS_PENDING != ParaNdis5_StopReceive(pContext, TRUE, OnReceiveStopped)) + NdisSetEvent(&pContext->HaltEvent); + WaitHaltEvent(pContext, "Receive"); + ParaNdis_CleanupContext(pContext); + NdisCancelTimer(&pContext->DPCPostProcessTimer, &bUnused); + ParaNdis_DebugHistory(pContext, hopHalt, NULL, 0, 0, 0); + ParaNdis_DebugRegisterMiniport(pContext, FALSE); + NdisFreeMemory(pContext, 0, 0); + DEBUG_EXIT_STATUS(0, status); +} + + +/************************************************************* +Called periodically (usually each 2 seconds) +*************************************************************/ +static BOOLEAN NTAPI ParaNdis5_CheckForHang(IN NDIS_HANDLE MiniportAdapterContext) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + DEBUG_ENTRY(8); + return ParaNdis_CheckForHang(pContext); +} + +/************************************************************* +Required NDIS procedure +Responsible for hardware interrupt handling +*************************************************************/ +static VOID NTAPI ParaNdis5_MiniportISR(OUT PBOOLEAN InterruptRecognized, + OUT PBOOLEAN QueueMiniportHandleInterrupt, + IN NDIS_HANDLE MiniportAdapterContext) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + BOOLEAN b; + *QueueMiniportHandleInterrupt = FALSE; + b = ParaNdis_OnLegacyInterrupt(pContext, QueueMiniportHandleInterrupt); + *InterruptRecognized = b; + DEBUG_EXIT_STATUS(7, (ULONG)b); +} + +/************************************************************* +Parameters: + +Return value: + +*************************************************************/ +VOID NTAPI ParaNdis5_PnPEventNotify(IN NDIS_HANDLE MiniportAdapterContext, + IN NDIS_DEVICE_PNP_EVENT PnPEvent, + IN PVOID InformationBuffer, + IN ULONG InformationBufferLength) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + ParaNdis_OnPnPEvent(pContext, PnPEvent, InformationBuffer, InformationBufferLength); +} + +/************************************************************* +Driver's entry point +Parameters: + as usual +Return value: + SUCCESS or error code +*************************************************************/ +NDIS_STATUS NTAPI DriverEntry(PVOID DriverObject,PVOID RegistryPath) +{ + NDIS_STATUS status; + NDIS_MINIPORT_CHARACTERISTICS chars; + ParaNdis_DebugInitialize(DriverObject, RegistryPath); + + status = NDIS_STATUS_FAILURE; + + DEBUG_ENTRY(0); + _LogOutString(0, __DATE__ " " __TIME__); + + NdisMInitializeWrapper(&DriverHandle, + DriverObject, + RegistryPath, + NULL + ); + + if (DriverHandle) + { + NdisZeroMemory(&chars, sizeof(chars)); + //NDIS version of the miniport + chars.MajorNdisVersion = NDIS_MINIPORT_MAJOR_VERSION; + chars.MinorNdisVersion = NDIS_MINIPORT_MINOR_VERSION; + //Init and destruction + chars.InitializeHandler = ParaNdis5_Initialize; + chars.HaltHandler = ParaNdis5_Halt; + + //Interrupt and DPC handling + chars.HandleInterruptHandler = ParaNdis5_HandleDPC; + chars.ISRHandler = ParaNdis5_MiniportISR; + + //Packet transfer - send path and notification on the send packet + chars.SendPacketsHandler = ParaNdis5_SendPackets; + chars.ReturnPacketHandler = ParaNdis5_ReturnPacket; + + //OID set\get + chars.SetInformationHandler = ParaNdis5_SetOID; + chars.QueryInformationHandler = ParaNdis5_QueryOID; + + //Reset + chars.ResetHandler = ParaNdis5_Reset; + chars.CheckForHangHandler = ParaNdis5_CheckForHang; //optional + + chars.CancelSendPacketsHandler = ParaNdis5_CancelSendPackets; + chars.PnPEventNotifyHandler = ParaNdis5_PnPEventNotify; + chars.AdapterShutdownHandler = ParaNdis5_Shutdown; + + status = NdisMRegisterMiniport( + DriverHandle, + &chars, + sizeof(chars)); + } + + if (status == NDIS_STATUS_SUCCESS) + { + NdisMRegisterUnloadHandler(DriverHandle, ParaVirtualNICUnload); + } + else if (DriverHandle) + { + DPrintf(0, ("NdisMRegisterMiniport failed")); + NdisTerminateWrapper(DriverHandle, NULL); + } + else + { + DPrintf(0, ("NdisMInitializeWrapper failed")); + } + + DEBUG_EXIT_STATUS(status ? 0 : 4, status); + return status; +} diff --git a/drivers/network/dd/netkvm/wxp/ParaNdis5-Impl.c b/drivers/network/dd/netkvm/wxp/ParaNdis5-Impl.c new file mode 100644 index 00000000000..86cb8945aea --- /dev/null +++ b/drivers/network/dd/netkvm/wxp/ParaNdis5-Impl.c @@ -0,0 +1,1474 @@ +/* + * This file contains NDIS5.X Implementation of adapter driver procedures. + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "ParaNdis5.h" + + +#ifdef WPP_EVENT_TRACING +#include "ParaNdis5-Impl.tmh" +#endif + + +/********************************************************** +Per-packet information holder +***********************************************************/ +#define SEND_ENTRY_FLAG_READY 0x0001 +#define SEND_ENTRY_TSO_USED 0x0002 +#define SEND_ENTRY_NO_INDIRECT 0x0004 +#define SEND_ENTRY_TCP_CS 0x0008 +#define SEND_ENTRY_UDP_CS 0x0010 +#define SEND_ENTRY_IP_CS 0x0020 + + + +typedef struct _tagSendEntry +{ + LIST_ENTRY list; + PNDIS_PACKET packet; + ULONG flags; + ULONG ipTransferUnit; + union + { + ULONG PriorityDataLong; + UCHAR PriorityData[4]; + }; +} tSendEntry; + +/********************************************************** +This defines field in NDIS_PACKET structure to use as holder +of our reference pointer for indicated packets +***********************************************************/ +#define IDXTOUSE 0 +#define REF_MINIPORT(Packet) ((PVOID *)(Packet->MiniportReservedEx + IDXTOUSE * sizeof(PVOID))) + + +/********************************************************** +Memory allocation procedure +Parameters: + context(not used) + ULONG ulRequiredSize size of block to allocate +Return value: + PVOID pointer to block or NULL if failed +***********************************************************/ +PVOID ParaNdis_AllocateMemory(PARANDIS_ADAPTER *pContext, ULONG ulRequiredSize) +{ + PVOID p; + UNREFERENCED_PARAMETER(pContext); + if (NDIS_STATUS_SUCCESS != NdisAllocateMemoryWithTag(&p, ulRequiredSize, PARANDIS_MEMORY_TAG)) + p = NULL; + if (!p) + { + DPrintf(0, ("[%s] failed (%d bytes)", __FUNCTION__, ulRequiredSize)); + } + return p; +} + +/********************************************************** +Implementation of "open adapter configuration" operation +Parameters: + context +Return value: + NDIS_HANDLE Handle to open configuration or NULL, if failed +***********************************************************/ +NDIS_HANDLE ParaNdis_OpenNICConfiguration(PARANDIS_ADAPTER *pContext) +{ + NDIS_STATUS status; + NDIS_HANDLE cfg; + DEBUG_ENTRY(2); + NdisOpenConfiguration(&status, &cfg, pContext->WrapperConfigurationHandle); + if (status != NDIS_STATUS_SUCCESS) + cfg = NULL; + DEBUG_EXIT_STATUS(0, status); + return cfg; +} + +void ParaNdis_RestoreDeviceConfigurationAfterReset( + PARANDIS_ADAPTER *pContext) +{ + +} + + +/********************************************************** +Indicates connect/disconnect events +Parameters: + context + BOOLEAN bConnected 1/0 connect/disconnect +***********************************************************/ +VOID ParaNdis_IndicateConnect(PARANDIS_ADAPTER *pContext, BOOLEAN bConnected, BOOLEAN bForce) +{ + // indicate disconnect always + if (bConnected != pContext->bConnected || bForce) + { + pContext->bConnected = bConnected; + DPrintf(0, ("Indicating %sconnect", bConnected ? "" : "dis")); + ParaNdis_DebugHistory(pContext, hopConnectIndication, NULL, bConnected, 0, 0); + NdisMIndicateStatus( + pContext->MiniportHandle, + bConnected ? NDIS_STATUS_MEDIA_CONNECT : NDIS_STATUS_MEDIA_DISCONNECT, + 0, + 0); + NdisMIndicateStatusComplete(pContext->MiniportHandle); + } +} + +VOID ParaNdis_SetPowerState(PARANDIS_ADAPTER *pContext, NDIS_DEVICE_POWER_STATE newState) +{ + //NDIS_DEVICE_POWER_STATE prev = pContext->powerState; + pContext->powerState = newState; +} + + +/********************************************************** +Callback of timer for connect indication, if used +Parameters: + context (on FunctionContext) + all the rest are irrelevant +***********************************************************/ +static VOID NTAPI OnConnectTimer( + IN PVOID SystemSpecific1, + IN PVOID FunctionContext, + IN PVOID SystemSpecific2, + IN PVOID SystemSpecific3 + ) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)FunctionContext; + ParaNdis_ReportLinkStatus(pContext, FALSE); +} + +/********************************************************** +NDIS5 implementation of shared memory allocation +Parameters: + context + tCompletePhysicalAddress *pAddresses + the structure accumulates all our knowledge + about the allocation (size, addresses, cacheability etc) +Return value: + TRUE if the allocation was successful +***********************************************************/ +BOOLEAN ParaNdis_InitialAllocatePhysicalMemory( + PARANDIS_ADAPTER *pContext, + tCompletePhysicalAddress *pAddresses) +{ + NdisMAllocateSharedMemory( + pContext->MiniportHandle, + pAddresses->size, + (BOOLEAN)pAddresses->IsCached, + &pAddresses->Virtual, + &pAddresses->Physical); + return pAddresses->Virtual != NULL; +} + +/********************************************************** +Callback of timer for pending events cleanup after regular DPC processing +Parameters: + context (on FunctionContext) + all the rest are irrelevant +***********************************************************/ +static VOID NTAPI OnDPCPostProcessTimer( + IN PVOID SystemSpecific1, + IN PVOID FunctionContext, + IN PVOID SystemSpecific2, + IN PVOID SystemSpecific3 + ) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)FunctionContext; + ULONG requiresProcessing; + requiresProcessing = ParaNdis_DPCWorkBody(pContext, PARANDIS_UNLIMITED_PACKETS_TO_INDICATE); + if (requiresProcessing) + { + // we need to request additional DPC + InterlockedOr(&pContext->InterruptStatus, requiresProcessing); + NdisSetTimer(&pContext->DPCPostProcessTimer, 10); + } +} + +/********************************************************** +NDIS5 implementation of shared memory freeing +Parameters: + context + tCompletePhysicalAddress *pAddresses + the structure accumulates all our knowledge + about the allocation (size, addresses, cacheability etc) + filled by ParaNdis_InitialAllocatePhysicalMemory +***********************************************************/ +VOID ParaNdis_FreePhysicalMemory( + PARANDIS_ADAPTER *pContext, + tCompletePhysicalAddress *pAddresses) +{ + + NdisMFreeSharedMemory( + pContext->MiniportHandle, + pAddresses->size, + (BOOLEAN)pAddresses->IsCached, + pAddresses->Virtual, + pAddresses->Physical); +} + +static void DebugParseOffloadBits() +{ + NDIS_TCP_IP_CHECKSUM_PACKET_INFO info; + tChecksumCheckResult res; + ULONG val = 1; + int level = 1; + while (val) + { + info.Value = val; + if (info.Receive.NdisPacketIpChecksumFailed) DPrintf(level, ("W.%X=IPCS failed", val)); + if (info.Receive.NdisPacketIpChecksumSucceeded) DPrintf(level, ("W.%X=IPCS OK", val)); + if (info.Receive.NdisPacketTcpChecksumFailed) DPrintf(level, ("W.%X=TCPCS failed", val)); + if (info.Receive.NdisPacketTcpChecksumSucceeded) DPrintf(level, ("W.%X=TCPCS OK", val)); + if (info.Receive.NdisPacketUdpChecksumFailed) DPrintf(level, ("W.%X=UDPCS failed", val)); + if (info.Receive.NdisPacketUdpChecksumSucceeded) DPrintf(level, ("W.%X=UDPCS OK", val)); + val = val << 1; + } + val = 1; + while (val) + { + res.value = val; + if (res.flags.IpFailed) DPrintf(level, ("C.%X=IPCS failed", val)); + if (res.flags.IpOK) DPrintf(level, ("C.%X=IPCS OK", val)); + if (res.flags.TcpFailed) DPrintf(level, ("C.%X=TCPCS failed", val)); + if (res.flags.TcpOK) DPrintf(level, ("C.%X=TCPCS OK", val)); + if (res.flags.UdpFailed) DPrintf(level, ("C.%X=UDPCS failed", val)); + if (res.flags.UdpOK) DPrintf(level, ("C.%X=UDPCS OK", val)); + val = val << 1; + } +} + +/********************************************************** +Procedure for NDIS5 specific initialization: + register interrupt handler + allocate pool of packets to indicate + allocate pool of buffers to indicate + initialize halt event +Parameters: + context +Return value: + SUCCESS or failure code +***********************************************************/ +NDIS_STATUS NTAPI ParaNdis_FinishSpecificInitialization( + PARANDIS_ADAPTER *pContext) +{ + NDIS_STATUS status; + UINT nPackets = pContext->NetMaxReceiveBuffers * 2; + DEBUG_ENTRY(2); + NdisInitializeEvent(&pContext->HaltEvent); + InitializeListHead(&pContext->SendQueue); + InitializeListHead(&pContext->TxWaitingList); + NdisInitializeTimer(&pContext->ConnectTimer, OnConnectTimer, pContext); + NdisInitializeTimer(&pContext->DPCPostProcessTimer, OnDPCPostProcessTimer, pContext); + + status = NdisMRegisterInterrupt( + &pContext->Interrupt, + pContext->MiniportHandle, + pContext->AdapterResources.Vector, + pContext->AdapterResources.Level, + TRUE, + TRUE, + NdisInterruptLevelSensitive); + + if (status == NDIS_STATUS_SUCCESS) + { + NdisAllocatePacketPool( + &status, + &pContext->PacketPool, + nPackets, + PROTOCOL_RESERVED_SIZE_IN_PACKET ); + } + if (status == NDIS_STATUS_SUCCESS) + { + NdisAllocateBufferPool( + &status, + &pContext->BuffersPool, + nPackets); + } + +#if !DO_MAP_REGISTERS + if (status == NDIS_STATUS_SUCCESS) + { + status = NdisMInitializeScatterGatherDma( + pContext->MiniportHandle, + TRUE, + 0x10000); + pContext->bDmaInitialized = status == NDIS_STATUS_SUCCESS; + } +#else + if (status == NDIS_STATUS_SUCCESS) + { + status = NdisMAllocateMapRegisters( + pContext->MiniportHandle, + 0, + NDIS_DMA_32BITS, + 64, + PAGE_SIZE); + pContext->bDmaInitialized = status == NDIS_STATUS_SUCCESS; + } +#endif + if (status == NDIS_STATUS_SUCCESS) + { + DebugParseOffloadBits(); + } + DEBUG_EXIT_STATUS(status ? 0 : 2, status); + return status; +} + +/********************************************************** +Procedure of NDIS5-specific cleanup: + deregister interrupt + free buffer and packet pool +Parameters: + context +***********************************************************/ +VOID ParaNdis_FinalizeCleanup(PARANDIS_ADAPTER *pContext) +{ + if (pContext->Interrupt.InterruptObject) + { + NdisMDeregisterInterrupt(&pContext->Interrupt); + } + if (pContext->BuffersPool) + { + NdisFreeBufferPool(pContext->BuffersPool); + } + if (pContext->PacketPool) + { + NdisFreePacketPool(pContext->PacketPool); + } +#if DO_MAP_REGISTERS + if (pContext->bDmaInitialized) + { + NdisMFreeMapRegisters(pContext->MiniportHandle); + } +#endif +} + + +static FORCEINLINE ULONG MaxNdisBufferDataSize(PARANDIS_ADAPTER *pContext, pIONetDescriptor pBufferDesc) +{ + ULONG size = pBufferDesc->DataInfo.size; + if (pContext->bUseMergedBuffers) size -= pContext->nVirtioHeaderSize; + return size; +} + + +/********************************************************** +NDIS5-specific procedure for binding RX buffer to +NDIS_PACKET and NDIS_BUFFER +Parameters: + context + pIONetDescriptor pBuffersDesc VirtIO buffer descriptor + +Return value: + TRUE, if bound successfully + FALSE, if no buffer or packet can be allocated +***********************************************************/ +BOOLEAN ParaNdis_BindBufferToPacket( + PARANDIS_ADAPTER *pContext, + pIONetDescriptor pBufferDesc) +{ + NDIS_STATUS status; + PNDIS_BUFFER pBuffer = NULL; + PNDIS_PACKET Packet = NULL; + NdisAllocatePacket(&status, &Packet, pContext->PacketPool); + if (status == NDIS_STATUS_SUCCESS) + { + NdisReinitializePacket(Packet); + NdisAllocateBuffer( + &status, + &pBuffer, + pContext->BuffersPool, + RtlOffsetToPointer(pBufferDesc->DataInfo.Virtual, pContext->bUseMergedBuffers ? pContext->nVirtioHeaderSize : 0), + MaxNdisBufferDataSize(pContext, pBufferDesc)); + } + if (status == NDIS_STATUS_SUCCESS) + { + PNDIS_PACKET_OOB_DATA pOOB = NDIS_OOB_DATA_FROM_PACKET(Packet); + NdisZeroMemory(pOOB, sizeof(NDIS_PACKET_OOB_DATA)); + NDIS_SET_PACKET_HEADER_SIZE(Packet, ETH_HEADER_SIZE); + NdisChainBufferAtFront(Packet, pBuffer); + pBufferDesc->pHolder = Packet; + } + else + { + if (pBuffer) NdisFreeBuffer(pBuffer); + if (Packet) NdisFreePacket(Packet); + } + return status == NDIS_STATUS_SUCCESS; +} + + +/********************************************************** +NDIS5-specific procedure for unbinding +previously bound RX buffer from it's NDIS_PACKET and NDIS_BUFFER +Parameters: + context + pIONetDescriptor pBuffersDesc VirtIO buffer descriptor +***********************************************************/ +void ParaNdis_UnbindBufferFromPacket( + PARANDIS_ADAPTER *pContext, + pIONetDescriptor pBufferDesc) +{ + if (pBufferDesc->pHolder) + { + PNDIS_BUFFER pBuffer = NULL; + PNDIS_PACKET Packet = pBufferDesc->pHolder; + pBufferDesc->pHolder = NULL; + NdisUnchainBufferAtFront(Packet, &pBuffer); + if (pBuffer) + { + NdisAdjustBufferLength(pBuffer, MaxNdisBufferDataSize(pContext, pBufferDesc)); + NdisFreeBuffer(pBuffer); + } + NdisFreePacket(Packet); + } +} + +/********************************************************** +NDIS5-specific procedure to indicate received packets + +Parameters: + context + pIONetDescriptor pBuffersDescriptor - VirtIO buffer descriptor of data buffer + PVOID dataBuffer - data buffer to pass to network stack + PULONG pLength - size of received packet. + BOOLEAN bPrepareOnly - only return NBL for further indication in batch +Return value: + TRUE is packet indicated + FALSE if not (in this case, the descriptor should be freed now) +If priority header is in the packet. it will be removed and *pLength decreased +***********************************************************/ +tPacketIndicationType ParaNdis_IndicateReceivedPacket( + PARANDIS_ADAPTER *pContext, + PVOID dataBuffer, + PULONG pLength, + BOOLEAN bPrepareOnly, + pIONetDescriptor pBuffersDesc) +{ + PNDIS_BUFFER pBuffer = NULL; + PNDIS_BUFFER pNoBuffer = NULL; + PNDIS_PACKET Packet = pBuffersDesc->pHolder; + ULONG length = *pLength; + if (Packet) NdisUnchainBufferAtFront(Packet, &pBuffer); + if (Packet) NdisUnchainBufferAtFront(Packet, &pNoBuffer); + if (pBuffer) + { + UINT uTotalLength; + NDIS_PACKET_8021Q_INFO qInfo; + qInfo.Value = NULL; + if ((pContext->ulPriorityVlanSetting && length > (ETH_PRIORITY_HEADER_OFFSET + ETH_PRIORITY_HEADER_SIZE)) || + length > pContext->MaxPacketSize.nMaxFullSizeOS) + { + PUCHAR pPriority = (PUCHAR)dataBuffer + ETH_PRIORITY_HEADER_OFFSET; + if (ETH_HAS_PRIO_HEADER(dataBuffer)) + { + if (IsPrioritySupported(pContext)) + qInfo.TagHeader.UserPriority = (pPriority[2] & 0xE0) >> 5; + if (IsVlanSupported(pContext)) + { + qInfo.TagHeader.VlanId = (((USHORT)(pPriority[2] & 0x0F)) << 8) | pPriority[3]; + if (pContext->VlanId && pContext->VlanId != qInfo.TagHeader.VlanId) + { + DPrintf(0, ("[%s] Failing unexpected VlanID %d", __FUNCTION__, qInfo.TagHeader.VlanId)); + pContext->extraStatistics.framesFilteredOut++; + pBuffer = NULL; + } + } + RtlMoveMemory( + pPriority, + pPriority + ETH_PRIORITY_HEADER_SIZE, + length - ETH_PRIORITY_HEADER_OFFSET - ETH_PRIORITY_HEADER_SIZE); + length -= ETH_PRIORITY_HEADER_SIZE; + if (length > pContext->MaxPacketSize.nMaxFullSizeOS) + { + DPrintf(0, ("[%s] Can not indicate up packet of %d", __FUNCTION__, length)); + pBuffer = NULL; + } + DPrintf(1, ("[%s] Found priority data %p", __FUNCTION__, qInfo.Value)); + pContext->extraStatistics.framesRxPriority++; + } + } + + if (pBuffer) + { + PVOID headerBuffer = pContext->bUseMergedBuffers ? pBuffersDesc->DataInfo.Virtual:pBuffersDesc->HeaderInfo.Virtual; + virtio_net_hdr_basic *pHeader = (virtio_net_hdr_basic *)headerBuffer; + tChecksumCheckResult csRes; + NDIS_PER_PACKET_INFO_FROM_PACKET(Packet, Ieee8021QInfo) = qInfo.Value; + NDIS_SET_PACKET_STATUS(Packet, STATUS_SUCCESS); + ParaNdis_PadPacketReceived(dataBuffer, &length); + NdisAdjustBufferLength(pBuffer, length); + NdisChainBufferAtFront(Packet, pBuffer); + NdisQueryPacket(Packet, NULL, NULL, NULL, &uTotalLength); + *REF_MINIPORT(Packet) = pBuffersDesc; + csRes = ParaNdis_CheckRxChecksum(pContext, pHeader->flags, dataBuffer, length); + if (csRes.value) + { + NDIS_TCP_IP_CHECKSUM_PACKET_INFO qCSInfo; + qCSInfo.Value = 0; + qCSInfo.Receive.NdisPacketIpChecksumFailed = csRes.flags.IpFailed; + qCSInfo.Receive.NdisPacketIpChecksumSucceeded = csRes.flags.IpOK; + qCSInfo.Receive.NdisPacketTcpChecksumFailed = csRes.flags.TcpFailed; + qCSInfo.Receive.NdisPacketTcpChecksumSucceeded = csRes.flags.TcpOK; + qCSInfo.Receive.NdisPacketUdpChecksumFailed = csRes.flags.UdpFailed; + qCSInfo.Receive.NdisPacketUdpChecksumSucceeded = csRes.flags.UdpOK; + NDIS_PER_PACKET_INFO_FROM_PACKET(Packet, TcpIpChecksumPacketInfo) = (PVOID) (ULONG_PTR) qCSInfo.Value; + DPrintf(1, ("Reporting CS %X->%X", csRes.value, qCSInfo.Value)); + } + + DPrintf(4, ("[%s] buffer %p(%d b.)", __FUNCTION__, pBuffersDesc, length)); + if (!bPrepareOnly) + { + NdisMIndicateReceivePacket( + pContext->MiniportHandle, + &Packet, + 1); + } + } + *pLength = length; + } + if (!pBuffer) + { + DPrintf(0, ("[%s] Error: %p(%d b.) with packet %p", __FUNCTION__, + pBuffersDesc, length, Packet)); + Packet = NULL; + } + if (pNoBuffer) + { + DPrintf(0, ("[%s] Error: %p(%d b.) with packet %p, buf %p,%p", __FUNCTION__, + pBuffersDesc, length, Packet, pBuffer, pNoBuffer)); + } + return Packet; +} + +VOID ParaNdis_IndicateReceivedBatch( + PARANDIS_ADAPTER *pContext, + tPacketIndicationType *pBatch, + ULONG nofPackets) +{ + NdisMIndicateReceivePacket( + pContext->MiniportHandle, + pBatch, + nofPackets); +} + +static FORCEINLINE void GET_NUMBER_OF_SG_ELEMENTS(PNDIS_PACKET Packet, UINT *pNum) +{ + PSCATTER_GATHER_LIST pSGList; + pSGList = NDIS_PER_PACKET_INFO_FROM_PACKET(Packet, ScatterGatherListPacketInfo); + if (pSGList) + { + *pNum = pSGList->NumberOfElements; + } +} + +/********************************************************** +Complete TX packets to NDIS with status, indicated inside packet +Parameters: + context + PNDIS_PACKET Packet packet to complete +***********************************************************/ +static void CompletePacket(PARANDIS_ADAPTER *pContext, PNDIS_PACKET Packet) +{ + LONG lRestToReturn; + NDIS_STATUS status = NDIS_GET_PACKET_STATUS(Packet); + lRestToReturn = NdisInterlockedDecrement(&pContext->NetTxPacketsToReturn); + ParaNdis_DebugHistory(pContext, hopSendComplete, Packet, 0, lRestToReturn, status); + NdisMSendComplete(pContext->MiniportHandle, Packet, status); +} + +/********************************************************** +Copy data from specified packet to VirtIO buffer, minimum 60 bytes +Parameters: + PNDIS_PACKET Packet packet to copy data from + PVOID dest destination to copy + ULONG maxSize maximal size of destination +Return value: + size = number of bytes copied + if 0, the packet is not transmitted and should be dropped + ( should never happen) + request +***********************************************************/ +tCopyPacketResult ParaNdis_PacketCopier( + PNDIS_PACKET Packet, PVOID dest, ULONG maxSize, PVOID refValue, BOOLEAN bPreview) +{ + PNDIS_BUFFER pBuffer; + ULONG PriorityDataLong = ((tSendEntry *)refValue)->PriorityDataLong; + tCopyPacketResult result; + /* the copier called also for getting Ethernet header + for statistics, when the transfer uses SG table */ + UINT uLength = 0; + ULONG nCopied = 0; + ULONG ulToCopy = 0; + if (bPreview) PriorityDataLong = 0; + NdisQueryPacket(Packet, + NULL, + NULL, + &pBuffer, + (PUINT)&ulToCopy); + + if (ulToCopy > maxSize) ulToCopy = bPreview ? maxSize : 0; + while (pBuffer && ulToCopy) + { + PVOID VirtualAddress = NULL; + NdisQueryBufferSafe(pBuffer, + &VirtualAddress, + &uLength, + NormalPagePriority); + if (!VirtualAddress) + { + /* the packet copy failed */ + nCopied = 0; + break; + } + if(uLength) + { + // Copy the data. + if (uLength > ulToCopy) uLength = ulToCopy; + ulToCopy -= uLength; + if ((PriorityDataLong & 0xFFFF) && + nCopied < ETH_PRIORITY_HEADER_OFFSET && + (nCopied + uLength) >= ETH_PRIORITY_HEADER_OFFSET) + { + ULONG ulCopyNow = ETH_PRIORITY_HEADER_OFFSET - nCopied; + NdisMoveMemory(dest, VirtualAddress, ulCopyNow); + dest = (PUCHAR)dest + ulCopyNow; + VirtualAddress = (PUCHAR)VirtualAddress + ulCopyNow; + NdisMoveMemory(dest, &PriorityDataLong, 4); + nCopied += 4; + dest = (PCHAR)dest + 4; + ulCopyNow = uLength - ulCopyNow; + if (ulCopyNow) NdisMoveMemory(dest, VirtualAddress, ulCopyNow); + dest = (PCHAR)dest + ulCopyNow; + nCopied += uLength; + } + else + { + NdisMoveMemory(dest, VirtualAddress, uLength); + nCopied += uLength; + dest = (PUCHAR)dest + uLength; + } + } + NdisGetNextBuffer(pBuffer, &pBuffer); + } + + DEBUG_EXIT_STATUS(4, nCopied); + result.size = nCopied; + return result; +} + + +/********************************************************** + Callback on finished Tx descriptor +***********************************************************/ +VOID ParaNdis_OnTransmitBufferReleased(PARANDIS_ADAPTER *pContext, IONetDescriptor *pDesc) +{ + tSendEntry *pEntry = (tSendEntry *)pDesc->ReferenceValue; + if (pEntry) + { + DPrintf(2, ("[%s] Entry %p (packet %p, %d buffers) ready!", __FUNCTION__, pEntry, pEntry->packet, pDesc->nofUsedBuffers)); + pEntry->flags |= SEND_ENTRY_FLAG_READY; + pDesc->ReferenceValue = NULL; + ParaNdis_DebugHistory(pContext, hopBufferSent, pEntry->packet, 0, pContext->nofFreeHardwareBuffers, pContext->nofFreeTxDescriptors); + } + else + { + ParaNdis_DebugHistory(pContext, hopBufferSent, NULL, 0, pContext->nofFreeHardwareBuffers, pContext->nofFreeTxDescriptors); + DPrintf(0, ("[%s] ERROR: Send Entry not set!", __FUNCTION__)); + } +} + + +static FORCEINLINE ULONG CalculateTotalOffloadSize( + ULONG packetSize, + ULONG mss, + ULONG ipheaderOffset, + ULONG maxPossiblePacketSize, + tTcpIpPacketParsingResult packetReview) +{ + ULONG ul = 0; + ULONG tcpipHeaders = packetReview.XxpIpHeaderSize; + ULONG allHeaders = tcpipHeaders + ipheaderOffset; + if (tcpipHeaders && (mss + allHeaders) <= maxPossiblePacketSize) + { + ULONG nFragments = (packetSize - allHeaders)/mss; + ULONG last = (packetSize - allHeaders)%mss; + ul = nFragments * (mss + allHeaders) + last + (last ? allHeaders : 0); + } + DPrintf(1, ("[%s]%s %d/%d, headers %d)", + __FUNCTION__, !ul ? "ERROR:" : "", ul, mss, allHeaders)); + return ul; +} + +/********************************************************** +Maps the HW buffers of the packet into entries of VirtIO queue +Parameters: + miniport context + PNDIS_PACKET Packet packet to copy data from + PVOID ReferenceValue - tSendEntry * of the packet + VirtIOBufferDescriptor buffers = array of buffers to map packet buffers + (it contains number of SG entries >= number of hw elements in the packet) + pIONetDescriptor pDesc - holder of VirtIO header and reserved data buffer + for possible replacement of one or more HW buffers + +Returns @pMapperResult: (zeroed before call) + .usBuffersMapped - number of buffers mapped (one of them may be our own) + .ulDataSize - number of bytes to report as transmitted (802.1P tag is not counted) + .usBufferSpaceUsed - number of bytes used in data space of pIONetDescriptor pDesc +***********************************************************/ +VOID ParaNdis_PacketMapper( + PARANDIS_ADAPTER *pContext, + PNDIS_PACKET packet, + PVOID ReferenceValue, + struct VirtIOBufferDescriptor *buffers, + pIONetDescriptor pDesc, + tMapperResult *pMapperResult) +{ + tSendEntry *pSendEntry = (tSendEntry *)ReferenceValue; + ULONG PriorityDataLong = pSendEntry->PriorityDataLong; + PSCATTER_GATHER_LIST pSGList = NDIS_PER_PACKET_INFO_FROM_PACKET(packet, ScatterGatherListPacketInfo); + SCATTER_GATHER_ELEMENT *pSGElements = pSGList->Elements; + + + if (pSGList && pSGList->NumberOfElements) + { + UINT i, lengthGet = 0, lengthPut = 0, nCompleteBuffersToSkip = 0, nBytesSkipInFirstBuffer = 0; + if (pSendEntry->flags & (SEND_ENTRY_TSO_USED | SEND_ENTRY_TCP_CS | SEND_ENTRY_UDP_CS | SEND_ENTRY_IP_CS)) + lengthGet = pContext->Offload.ipHeaderOffset + MAX_IPV4_HEADER_SIZE + sizeof(TCPHeader); + if (PriorityDataLong && !lengthGet) + lengthGet = ETH_HEADER_SIZE; + if (lengthGet) + { + ULONG len = 0; + for (i = 0; i < pSGList->NumberOfElements; ++i) + { + len += pSGElements[i].Length; + if (len > lengthGet) + { + nBytesSkipInFirstBuffer = pSGList->Elements[i].Length - (len - lengthGet); + break; + } + DPrintf(2, ("[%s] skipping buffer %d of %d", __FUNCTION__, nCompleteBuffersToSkip, pSGElements[i].Length)); + nCompleteBuffersToSkip++; + } + // just for case of UDP packet shorter than TCP header + if (lengthGet > len) lengthGet = len; + lengthPut = lengthGet + (PriorityDataLong ? ETH_PRIORITY_HEADER_SIZE : 0); + } + + if (lengthPut > pDesc->DataInfo.size) + { + DPrintf(0, ("[%s] ERROR: can not substitute %d bytes, sending as is", __FUNCTION__, lengthPut)); + nCompleteBuffersToSkip = 0; + nBytesSkipInFirstBuffer = 0; + lengthGet = lengthPut = 0; + } + + if (lengthPut) + { + // we replace 1 or more HW buffers with one buffer preallocated for data + buffers->physAddr = pDesc->DataInfo.Physical; + buffers->length = lengthPut; + pMapperResult->usBufferSpaceUsed = (USHORT)lengthPut; + pMapperResult->ulDataSize += lengthGet; + pMapperResult->usBuffersMapped = (USHORT)(pSGList->NumberOfElements - nCompleteBuffersToSkip + 1); + pSGElements += nCompleteBuffersToSkip; + buffers++; + DPrintf(1, ("[%s](%d bufs) skip %d buffers + %d bytes", + __FUNCTION__, pSGList->NumberOfElements, nCompleteBuffersToSkip, nBytesSkipInFirstBuffer)); + } + else + { + pMapperResult->usBuffersMapped = (USHORT)pSGList->NumberOfElements; + } + + for (i = nCompleteBuffersToSkip; i < pSGList->NumberOfElements; ++i) + { + if (nBytesSkipInFirstBuffer) + { + buffers->physAddr.QuadPart = pSGElements->Address.QuadPart + nBytesSkipInFirstBuffer; + buffers->length = pSGElements->Length - nBytesSkipInFirstBuffer; + DPrintf(2, ("[%s] using HW buffer %d of %d-%d", __FUNCTION__, i, pSGElements->Length, nBytesSkipInFirstBuffer)); + nBytesSkipInFirstBuffer = 0; + } + else + { + buffers->physAddr = pSGElements->Address; + buffers->length = pSGElements->Length; + } + pMapperResult->ulDataSize += buffers->length; + pSGElements++; + buffers++; + } + + if (lengthPut) + { + PVOID pBuffer = pDesc->DataInfo.Virtual; + PVOID pIpHeader = RtlOffsetToPointer(pBuffer, pContext->Offload.ipHeaderOffset); + ParaNdis_PacketCopier(packet, pBuffer, lengthGet, ReferenceValue, TRUE); + + if (pSendEntry->flags & SEND_ENTRY_TSO_USED) + { + tTcpIpPacketParsingResult packetReview; + ULONG dummyTransferSize = 0; + USHORT saveBuffers = pMapperResult->usBuffersMapped; + ULONG flags = pcrIpChecksum | pcrTcpChecksum | pcrFixIPChecksum | pcrFixPHChecksum; + pMapperResult->usBuffersMapped = 0; + packetReview = ParaNdis_CheckSumVerify( + pIpHeader, + lengthGet - pContext->Offload.ipHeaderOffset, + flags, + __FUNCTION__); + /* uncomment to verify */ + /* + packetReview = ParaNdis_CheckSumVerify( + pIpHeader, + lengthGet - pContext->Offload.ipHeaderOffset, + pcrIpChecksum | pcrTcpChecksum, + __FUNCTION__); + */ + if (packetReview.ipCheckSum == ppresCSOK || packetReview.fixedIpCS) + { + dummyTransferSize = CalculateTotalOffloadSize( + pMapperResult->ulDataSize, + pSendEntry->ipTransferUnit, + pContext->Offload.ipHeaderOffset, + pContext->MaxPacketSize.nMaxFullSizeOS, + packetReview); + } + else + { + DPrintf(0, ("[%s] ERROR locating IP header in %d bytes(IP header of %d)", __FUNCTION__, + lengthGet, packetReview.ipHeaderSize)); + } + NDIS_PER_PACKET_INFO_FROM_PACKET(packet, TcpLargeSendPacketInfo) = (PVOID)(ULONG_PTR)dummyTransferSize; + if (dummyTransferSize) + { + virtio_net_hdr_basic *pheader = pDesc->HeaderInfo.Virtual; + unsigned short addPriorityLen = PriorityDataLong ? ETH_PRIORITY_HEADER_SIZE : 0; + pheader->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + pheader->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + pheader->hdr_len = (USHORT)(packetReview.XxpIpHeaderSize + pContext->Offload.ipHeaderOffset) + addPriorityLen; + pheader->gso_size = (USHORT)pSendEntry->ipTransferUnit; + pheader->csum_start = (USHORT)pContext->Offload.ipHeaderOffset + (USHORT)packetReview.ipHeaderSize + addPriorityLen; + pheader->csum_offset = TCP_CHECKSUM_OFFSET; + pMapperResult->usBuffersMapped = saveBuffers; + } + } + else if (pSendEntry->flags & SEND_ENTRY_IP_CS) + { + ParaNdis_CheckSumVerify( + pIpHeader, + lengthGet - pContext->Offload.ipHeaderOffset, + pcrIpChecksum | pcrFixIPChecksum, + __FUNCTION__); + } + + if (PriorityDataLong && pMapperResult->usBuffersMapped) + { + RtlMoveMemory( + RtlOffsetToPointer(pBuffer, ETH_PRIORITY_HEADER_OFFSET + ETH_PRIORITY_HEADER_SIZE), + RtlOffsetToPointer(pBuffer, ETH_PRIORITY_HEADER_OFFSET), + lengthGet - ETH_PRIORITY_HEADER_OFFSET + ); + NdisMoveMemory( + RtlOffsetToPointer(pBuffer, ETH_PRIORITY_HEADER_OFFSET), + &PriorityDataLong, + sizeof(ETH_PRIORITY_HEADER_SIZE)); + DPrintf(1, ("[%s] Populated priority value %lX", __FUNCTION__, PriorityDataLong)); + } + } + } + +} + +static void InitializeTransferParameters(tTxOperationParameters *pParams, tSendEntry *pEntry) +{ + ULONG flags = (pEntry->flags & SEND_ENTRY_TSO_USED) ? pcrLSO : 0; + if (pEntry->flags & SEND_ENTRY_NO_INDIRECT) flags |= pcrNoIndirect; + NdisQueryPacket(pEntry->packet, &pParams->nofSGFragments, NULL, NULL, (PUINT)&pParams->ulDataSize); + pParams->ReferenceValue = pEntry; + pParams->packet = pEntry->packet; + pParams->offloadMss = (pEntry->flags & SEND_ENTRY_TSO_USED) ? pEntry->ipTransferUnit : 0; + // on NDIS5 it is unknown + pParams->tcpHeaderOffset = 0; + // fills only if SGList present in the packet + GET_NUMBER_OF_SG_ELEMENTS(pEntry->packet, &pParams->nofSGFragments); + if (NDIS_GET_PACKET_PROTOCOL_TYPE(pEntry->packet) == NDIS_PROTOCOL_ID_TCP_IP) + { + flags |= pcrIsIP; + if (pEntry->flags & SEND_ENTRY_TCP_CS) + { + flags |= pcrTcpChecksum; + } + if (pEntry->flags & SEND_ENTRY_UDP_CS) + { + flags |= pcrUdpChecksum; + } + if (pEntry->flags & SEND_ENTRY_IP_CS) + { + flags |= pcrIpChecksum; + } + } + if (pEntry->PriorityDataLong) flags |= pcrPriorityTag; + pParams->flags = flags; +} + +BOOLEAN ParaNdis_ProcessTx( + PARANDIS_ADAPTER *pContext, + BOOLEAN IsDpc, + BOOLEAN IsInterrupt) +{ + LIST_ENTRY DoneList; + BOOLEAN bDoKick = FALSE; + UINT nBuffersSent = 0, nBytesSent = 0; + BOOLEAN bDataAvailable = FALSE; + tSendEntry *pEntry; + ONPAUSECOMPLETEPROC CallbackToCall = NULL; + InitializeListHead(&DoneList); + UNREFERENCED_PARAMETER(IsDpc); + NdisAcquireSpinLock(&pContext->SendLock); + + ParaNdis_DebugHistory(pContext, hopTxProcess, NULL, 1, pContext->nofFreeHardwareBuffers, pContext->nofFreeTxDescriptors); + do + { + if(IsTimeToReleaseTx(pContext)) + { + // release some buffers + ParaNdis_VirtIONetReleaseTransmitBuffers(pContext); + } + pEntry = NULL; + if (!IsListEmpty(&pContext->SendQueue)) + { + tCopyPacketResult result; + tTxOperationParameters Params; + pEntry = (tSendEntry *)RemoveHeadList(&pContext->SendQueue); + InitializeTransferParameters(&Params, pEntry); + bDataAvailable = TRUE; + result = ParaNdis_DoSubmitPacket(pContext, &Params); + if (result.error == cpeNoBuffer) + { + // can not send now, try next time + InsertHeadList(&pContext->SendQueue, &pEntry->list); + pEntry = NULL; + } + else if (result.error == cpeNoIndirect) + { + InsertHeadList(&pContext->SendQueue, &pEntry->list); + pEntry->flags |= SEND_ENTRY_NO_INDIRECT; + } + else + { + InsertTailList(&pContext->TxWaitingList, &pEntry->list); + ParaNdis_DebugHistory(pContext, hopSubmittedPacket, pEntry->packet, 0, result.error, Params.flags); + if (!result.size) + { + NDIS_STATUS status = NDIS_STATUS_FAILURE; + DPrintf(0, ("[%s] ERROR %d copying packet!", __FUNCTION__, result.error)); + if (result.error == cpeTooLarge) + { + status = NDIS_STATUS_BUFFER_OVERFLOW; + pContext->Statistics.ifOutErrors++; + } + NDIS_SET_PACKET_STATUS(pEntry->packet, status); + pEntry->flags |= SEND_ENTRY_FLAG_READY; + // do not worry, go to the next one + + } + else + { + nBuffersSent++; + nBytesSent += result.size; + DPrintf(2, ("[%s] Scheduled packet %p, entry %p(%d bytes)!", __FUNCTION__, + pEntry->packet, pEntry, result.size)); + } + } + } + } while (pEntry); + + if (nBuffersSent) + { + if(IsInterrupt) + { + bDoKick = TRUE; + } + else + { +#ifdef PARANDIS_TEST_TX_KICK_ALWAYS + virtqueue_kick_always(pContext->NetSendQueue); +#else + virtqueue_kick(pContext->NetSendQueue); +#endif + } + DPrintf(2, ("[%s] sent down %d p.(%d b.)", __FUNCTION__, nBuffersSent, nBytesSent)); + } + else if (bDataAvailable) + { + DPrintf(2, ("[%s] nothing sent", __FUNCTION__)); + } + + /* now check the waiting list of packets */ + while (!IsListEmpty(&pContext->TxWaitingList)) + { + pEntry = (tSendEntry *)RemoveHeadList(&pContext->TxWaitingList); + if (pEntry->flags & SEND_ENTRY_FLAG_READY) + { + InsertTailList(&DoneList, &pEntry->list); + } + else + { + InsertHeadList(&pContext->TxWaitingList, &pEntry->list); + break; + } + } + + if (IsListEmpty(&pContext->TxWaitingList) && pContext->SendState == srsPausing && pContext->SendPauseCompletionProc) + { + CallbackToCall = pContext->SendPauseCompletionProc; + pContext->SendPauseCompletionProc = NULL; + pContext->SendState = srsDisabled; + ParaNdis_DebugHistory(pContext, hopInternalSendPause, NULL, 0, 0, 0); + } + NdisReleaseSpinLock(&pContext->SendLock); + + while (!IsListEmpty(&DoneList)) + { + pEntry = (tSendEntry *)RemoveHeadList(&DoneList); + CompletePacket(pContext, pEntry->packet); + NdisFreeMemory(pEntry, 0, 0); + } + if (CallbackToCall) CallbackToCall(pContext); + + return bDoKick; +} + +/********************************************************** +NDIS releases packets previously indicated by miniport +Free the packet's buffer and the packet back to their pools +Returns VirtIO buffer back to queue of free blocks +Parameters: + context + IN PNDIS_PACKET Packet returned packet +***********************************************************/ +VOID NTAPI ParaNdis5_ReturnPacket(IN NDIS_HANDLE MiniportAdapterContext,IN PNDIS_PACKET Packet) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + pIONetDescriptor pBufferDescriptor; + pBufferDescriptor = (pIONetDescriptor) *REF_MINIPORT(Packet); + DPrintf(4, ("[%s] buffer %p", __FUNCTION__, pBufferDescriptor)); + + NdisAcquireSpinLock(&pContext->ReceiveLock); + pContext->ReuseBufferProc(pContext, pBufferDescriptor); + NdisReleaseSpinLock(&pContext->ReceiveLock); +} + +static __inline tSendEntry * PrepareSendEntry(PARANDIS_ADAPTER *pContext, PNDIS_PACKET Packet, ULONG len) +{ + ULONG mss = (ULONG)(ULONG_PTR)NDIS_PER_PACKET_INFO_FROM_PACKET(Packet, TcpLargeSendPacketInfo); + UINT protocol = NDIS_GET_PACKET_PROTOCOL_TYPE(Packet); + LPCSTR errorFmt = NULL; + LPCSTR offloadName = "NO offload"; + tSendEntry *pse = (tSendEntry *)ParaNdis_AllocateMemory(pContext, sizeof(tSendEntry)); + if (pse) + { + NDIS_PACKET_8021Q_INFO qInfo; + pse->packet = Packet; + pse->flags = 0; + pse->PriorityDataLong = 0; + pse->ipTransferUnit = len; + //pse->fullTCPCheckSum = 0; + qInfo.Value = pContext->ulPriorityVlanSetting ? + NDIS_PER_PACKET_INFO_FROM_PACKET(Packet, Ieee8021QInfo) : NULL; + if (!qInfo.TagHeader.VlanId) qInfo.TagHeader.VlanId = pContext->VlanId; + if (qInfo.TagHeader.CanonicalFormatId || !IsValidVlanId(pContext, qInfo.TagHeader.VlanId)) + { + DPrintf(0, ("[%s] Discarding priority tag %p", __FUNCTION__, qInfo.Value)); + errorFmt = "invalid priority tag"; + } + else if (qInfo.Value) + { + // ignore priority, if configured + if (!IsPrioritySupported(pContext)) + qInfo.TagHeader.UserPriority = 0; + // ignore VlanId, if specified + if (!IsVlanSupported(pContext)) + qInfo.TagHeader.VlanId = 0; + SetPriorityData(pse->PriorityData, qInfo.TagHeader.UserPriority, qInfo.TagHeader.VlanId); + DPrintf(1, ("[%s] Populated priority tag %p", __FUNCTION__, qInfo.Value)); + } + + if (!errorFmt && !mss && len > pContext->MaxPacketSize.nMaxFullSizeOS) + { + DPrintf(0, ("[%s] Request for offload with NO MSS, lso %d, ipheader %d", + __FUNCTION__, pContext->Offload.flags.fTxLso, pContext->Offload.ipHeaderOffset)); + if (pContext->Offload.flags.fTxLso && pContext->Offload.ipHeaderOffset) + { + mss = pContext->MaxPacketSize.nMaxFullSizeOS; + } + else + errorFmt = "illegal LSO request"; + } + + if (errorFmt) + { + // already failed + } + else if (mss > pContext->MaxPacketSize.nMaxFullSizeOS) + errorFmt = "mss is too big"; + else if (len > 0xFFFF) + errorFmt = "packet is bigger than we able to send"; + else if (mss && pContext->Offload.flags.fTxLso) + { + offloadName = "LSO"; + pse->ipTransferUnit = mss; + pse->flags |= SEND_ENTRY_TSO_USED; + // todo: move to common space + // to transmit 'len' with 'mss' we usually need 2 additional buffers + if ((len / mss + 3) > pContext->maxFreeHardwareBuffers) + errorFmt = "packet too big to fragment"; + else if (len < pContext->Offload.ipHeaderOffset) + errorFmt = "ip offset is bigger than packet"; + else if (protocol != NDIS_PROTOCOL_ID_TCP_IP) + errorFmt = "attempt to offload non-IP packet"; + else if (mss < pContext->Offload.ipHeaderOffset) + errorFmt = "mss is too small"; + } + else + { + // unexpected CS requests we do not fail - WHQL expects us to send them as is + NDIS_TCP_IP_CHECKSUM_PACKET_INFO csInfo; + csInfo.Value = (ULONG)(ULONG_PTR)NDIS_PER_PACKET_INFO_FROM_PACKET(Packet, TcpIpChecksumPacketInfo); + if (csInfo.Transmit.NdisPacketChecksumV4) + { + if (csInfo.Transmit.NdisPacketTcpChecksum) + { + offloadName = "TCP CS"; + if (pContext->Offload.flags.fTxTCPChecksum) + pse->flags |= SEND_ENTRY_TCP_CS; + else + errorFmt = "TCP CS requested but not enabled"; + } + if (csInfo.Transmit.NdisPacketUdpChecksum) + { + offloadName = "UDP CS"; + if (pContext->Offload.flags.fTxUDPChecksum) + pse->flags |= SEND_ENTRY_UDP_CS; + else + errorFmt = "UDP CS requested but not enabled"; + } + if (csInfo.Transmit.NdisPacketIpChecksum) + { + if (pContext->Offload.flags.fTxIPChecksum) + pse->flags |= SEND_ENTRY_IP_CS; + else + errorFmt = "IP CS requested but not enabled"; + } + if (errorFmt) + { + DPrintf(0, ("[%s] ERROR: %s (len %d)", __FUNCTION__, errorFmt, len)); + errorFmt = NULL; + } + } + } + } + + if (errorFmt) + { + DPrintf(0, ("[%s] ERROR: %s (len %d, mss %d)", __FUNCTION__, errorFmt, len, mss)); + if (pse) NdisFreeMemory(pse, 0, 0); + pse = NULL; + } + else + { + NDIS_PER_PACKET_INFO_FROM_PACKET(Packet, TcpLargeSendPacketInfo) = (PVOID)(ULONG_PTR)0; + DPrintf(1, ("[%s] Sending packet of %d with %s", __FUNCTION__, len, offloadName)); + if (pContext->bDoIPCheckTx) + { + tTcpIpPacketParsingResult res; + VOID *pcopy = ParaNdis_AllocateMemory(pContext, len); + ParaNdis_PacketCopier(pse->packet, pcopy, len, pse, TRUE); + res = ParaNdis_CheckSumVerify( + RtlOffsetToPointer(pcopy, pContext->Offload.ipHeaderOffset), + len, + pcrAnyChecksum/* | pcrFixAnyChecksum*/, + __FUNCTION__); + /* + if (res.xxpStatus == ppresXxpKnown) + { + TCPHeader *ptcp = (TCPHeader *) + RtlOffsetToPointer(pcopy, pContext->Offload.ipHeaderOffset + res.ipHeaderSize); + pse->fullTCPCheckSum = ptcp->tcp_xsum; + } + */ + NdisFreeMemory(pcopy, 0, 0); + } + } + return pse; +} + +/********************************************************** +NDIS sends us packets + Queues packets internally and calls the procedure to process the queue + +Parameters: + context + IN PPNDIS_PACKET PacketArray Array of packets to send + IN UINT NumberOfPackets number of packets + +***********************************************************/ +VOID NTAPI ParaNdis5_SendPackets(IN NDIS_HANDLE MiniportAdapterContext, + IN PPNDIS_PACKET PacketArray, + IN UINT NumberOfPackets) +{ + UINT i; + LIST_ENTRY FailedList, DoneList; + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + InitializeListHead(&FailedList); + InitializeListHead(&DoneList); + DPrintf(3, ("[%s] %d packets", __FUNCTION__, NumberOfPackets)); + ParaNdis_DebugHistory(pContext, hopSend, NULL, 1, NumberOfPackets, 0); + + NdisAcquireSpinLock(&pContext->SendLock); + + for (i = 0; i < NumberOfPackets; ++i) + { + UINT uPacketLength = 0; + NdisQueryPacketLength(PacketArray[i], &uPacketLength); + NDIS_SET_PACKET_STATUS(PacketArray[i], NDIS_STATUS_SUCCESS); + NdisInterlockedIncrement(&pContext->NetTxPacketsToReturn); + if (!pContext->bSurprizeRemoved && pContext->bConnected && pContext->SendState == srsEnabled && uPacketLength) + { + tSendEntry *pse = PrepareSendEntry(pContext, PacketArray[i], uPacketLength); + if (!pse) + { + NDIS_SET_PACKET_STATUS(PacketArray[i], NDIS_STATUS_FAILURE); + CompletePacket(pContext, PacketArray[i]); + } + else + { + UINT nFragments = 0; + GET_NUMBER_OF_SG_ELEMENTS(PacketArray[i], &nFragments); + ParaNdis_DebugHistory(pContext, hopSendPacketMapped, PacketArray[i], 0, nFragments, 0); + InsertTailList(&pContext->SendQueue, &pse->list); + } + } + else + { + NDIS_STATUS status = NDIS_STATUS_FAILURE; + if (pContext->bSurprizeRemoved) status = NDIS_STATUS_NOT_ACCEPTED; + NDIS_SET_PACKET_STATUS(PacketArray[i], status); + CompletePacket(pContext, PacketArray[i]); + DPrintf(1, ("[%s] packet of %d rejected", __FUNCTION__, uPacketLength)); + } + } + + NdisReleaseSpinLock(&pContext->SendLock); + + ParaNdis_ProcessTx(pContext, FALSE, FALSE); +} + +/********************************************************** +NDIS procedure, not easy to test +NDIS asks us to cancel packets with specified CancelID + +Parameters: + context + PVOID CancelId ID to cancel + +***********************************************************/ +VOID NTAPI ParaNdis5_CancelSendPackets(IN NDIS_HANDLE MiniportAdapterContext,IN PVOID CancelId) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + LIST_ENTRY DoneList, KeepList; + UINT n = 0; + tSendEntry *pEntry; + DEBUG_ENTRY(0); + InitializeListHead(&DoneList); + InitializeListHead(&KeepList); + NdisAcquireSpinLock(&pContext->SendLock); + while ( !IsListEmpty(&pContext->SendQueue)) + { + PNDIS_PACKET Packet; + pEntry = (tSendEntry *)RemoveHeadList(&pContext->SendQueue); + Packet = pEntry->packet; + if (NDIS_GET_PACKET_CANCEL_ID(Packet) == CancelId) + { + InsertTailList(&DoneList, &pEntry->list); + ++n; + } + else InsertTailList(&KeepList, &pEntry->list); + } + while ( !IsListEmpty(&KeepList)) + { + pEntry = (tSendEntry *)RemoveHeadList(&KeepList); + InsertTailList(&pContext->SendQueue, &pEntry->list); + } + NdisReleaseSpinLock(&pContext->SendLock); + while (!IsListEmpty(&DoneList)) + { + pEntry = (tSendEntry *)RemoveHeadList(&DoneList); + NDIS_SET_PACKET_STATUS(pEntry->packet, NDIS_STATUS_REQUEST_ABORTED); + CompletePacket(pContext, pEntry->packet); + NdisFreeMemory(pEntry, 0, 0); + } + DEBUG_EXIT_STATUS(0, n); +} + +/********************************************************** +Request to pause or resume data transmit +if stopped, all the packets in internal queue are returned +Parameters: + context + BOOLEAN bStop 1/0 - top or resume +***********************************************************/ +NDIS_STATUS ParaNdis5_StopSend(PARANDIS_ADAPTER *pContext, BOOLEAN bStop, ONPAUSECOMPLETEPROC Callback) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + if (bStop) + { + LIST_ENTRY DoneList; + tSendEntry *pEntry; + DEBUG_ENTRY(0); + ParaNdis_DebugHistory(pContext, hopInternalSendPause, NULL, 1, 0, 0); + InitializeListHead(&DoneList); + NdisAcquireSpinLock(&pContext->SendLock); + if (IsListEmpty(&pContext->TxWaitingList)) + { + pContext->SendState = srsDisabled; + while (!IsListEmpty(&pContext->SendQueue)) + { + pEntry = (tSendEntry *)RemoveHeadList(&pContext->SendQueue); + InsertTailList(&DoneList, &pEntry->list); + } + ParaNdis_DebugHistory(pContext, hopInternalSendPause, NULL, 0, 0, 0); + } + else + { + pContext->SendState = srsPausing; + pContext->SendPauseCompletionProc = Callback; + status = NDIS_STATUS_PENDING; + while (!IsListEmpty(&pContext->SendQueue)) + { + pEntry = (tSendEntry *)RemoveHeadList(&pContext->SendQueue); + pEntry->flags |= SEND_ENTRY_FLAG_READY; + InsertTailList(&pContext->TxWaitingList, &pEntry->list); + } + } + + NdisReleaseSpinLock(&pContext->SendLock); + while (!IsListEmpty(&DoneList)) + { + pEntry = (tSendEntry *)RemoveHeadList(&DoneList); + NDIS_SET_PACKET_STATUS(pEntry->packet, NDIS_STATUS_REQUEST_ABORTED); + CompletePacket(pContext, pEntry->packet); + NdisFreeMemory(pEntry, 0, 0); + } + } + else + { + pContext->SendState = srsEnabled; + ParaNdis_DebugHistory(pContext, hopInternalSendResume, NULL, 0, 0, 0); + } + return status; +} + +/********************************************************** +Pause or resume receive operation: +Parameters: + context + BOOLEAN bStop 1/0 - pause or resume + ONPAUSECOMPLETEPROC Callback callback to call, if not completed immediately + +Return value: + SUCCESS, if there is no RX packets under NDIS management + PENDING, if we need to wait until NDIS returns us packets +***********************************************************/ +NDIS_STATUS ParaNdis5_StopReceive( + PARANDIS_ADAPTER *pContext, + BOOLEAN bStop, + ONPAUSECOMPLETEPROC Callback + ) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + if (bStop) + { + ParaNdis_DebugHistory(pContext, hopInternalReceivePause, NULL, 1, 0, 0); + NdisAcquireSpinLock(&pContext->ReceiveLock); + if (IsListEmpty(&pContext->NetReceiveBuffersWaiting)) + { + pContext->ReceiveState = srsDisabled; + ParaNdis_DebugHistory(pContext, hopInternalReceivePause, NULL, 0, 0, 0); + } + else + { + pContext->ReceiveState = srsPausing; + pContext->ReceivePauseCompletionProc = Callback; + status = NDIS_STATUS_PENDING; + } + NdisReleaseSpinLock(&pContext->ReceiveLock); + } + else + { + pContext->ReceiveState = srsEnabled; + ParaNdis_DebugHistory(pContext, hopInternalReceiveResume, NULL, 0, 0, 0); + } + return status; +} + +/************************************************************* +Required NDIS procedure, spawns regular (Common) DPC processing +*************************************************************/ +VOID NTAPI ParaNdis5_HandleDPC(IN NDIS_HANDLE MiniportAdapterContext) +{ + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + ULONG requiresProcessing; + BOOLEAN unused; + DEBUG_ENTRY(7); + // we do not need the timer, as DPC will do all the job + // this is not a problem if the timer procedure is already running, + // we need to do our job anyway + NdisCancelTimer(&pContext->DPCPostProcessTimer, &unused); + requiresProcessing = ParaNdis_DPCWorkBody(pContext, PARANDIS_UNLIMITED_PACKETS_TO_INDICATE); + if (requiresProcessing) + { + // we need to request additional DPC + InterlockedOr(&pContext->InterruptStatus, requiresProcessing); + NdisSetTimer(&pContext->DPCPostProcessTimer, 10); + } +} + +BOOLEAN ParaNdis_SynchronizeWithInterrupt( + PARANDIS_ADAPTER *pContext, + ULONG messageId, + tSynchronizedProcedure procedure, + PVOID parameter) +{ + tSynchronizedContext SyncContext; + SyncContext.pContext = pContext; + SyncContext.Parameter = parameter; + return NdisMSynchronizeWithInterrupt(&pContext->Interrupt, procedure, &SyncContext); +} diff --git a/drivers/network/dd/netkvm/wxp/ParaNdis5-Oid.c b/drivers/network/dd/netkvm/wxp/ParaNdis5-Oid.c new file mode 100644 index 00000000000..d3c784e2d65 --- /dev/null +++ b/drivers/network/dd/netkvm/wxp/ParaNdis5-Oid.c @@ -0,0 +1,785 @@ +/* + * This file contains NDIS5.X implementation of + * OID-related adapter driver procedures + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "ParaNdis5.h" +#include "ParaNdis-Oid.h" + +#ifdef WPP_EVENT_TRACING +#include "ParaNdis5-Oid.tmh" +#endif + +#define OIDENTRY(oid, el, xfl, xokl, flags) \ +{ oid, el, xfl, xokl, flags, NULL } +#define OIDENTRYPROC(oid, el, xfl, xokl, flags, setproc) \ +{ oid, el, xfl, xokl, flags, setproc } + +static NDIS_TASK_OFFLOAD_HEADER ReservedHeader = +{ + NDIS_TASK_OFFLOAD_VERSION, + sizeof(NDIS_TASK_OFFLOAD_HEADER), + 0, + 0, + { IEEE_802_3_Encapsulation, { 1, 0 }, 0 } +}; + + +static NDIS_OID SupportedOids[] = { + OID_GEN_SUPPORTED_LIST, + OID_GEN_HARDWARE_STATUS, + OID_GEN_MEDIA_SUPPORTED, + OID_GEN_MEDIA_IN_USE, + OID_GEN_MAXIMUM_LOOKAHEAD, + OID_GEN_MAXIMUM_FRAME_SIZE, + OID_GEN_LINK_SPEED, + OID_GEN_TRANSMIT_BUFFER_SPACE, + OID_GEN_RECEIVE_BUFFER_SPACE, + OID_GEN_TRANSMIT_BLOCK_SIZE, + OID_GEN_RECEIVE_BLOCK_SIZE, + OID_GEN_VENDOR_ID, + OID_GEN_VENDOR_DESCRIPTION, + OID_GEN_VENDOR_DRIVER_VERSION, + OID_GEN_CURRENT_PACKET_FILTER, + OID_GEN_CURRENT_LOOKAHEAD, + OID_GEN_DRIVER_VERSION, + OID_GEN_MAXIMUM_TOTAL_SIZE, + OID_GEN_PROTOCOL_OPTIONS, + OID_GEN_MAC_OPTIONS, + OID_GEN_MEDIA_CONNECT_STATUS, + OID_GEN_MAXIMUM_SEND_PACKETS, + OID_GEN_XMIT_OK, + OID_GEN_RCV_OK, + OID_GEN_VLAN_ID, + OID_GEN_XMIT_ERROR, + OID_GEN_RCV_ERROR, + OID_GEN_RCV_NO_BUFFER, + OID_GEN_RCV_CRC_ERROR, + OID_GEN_TRANSMIT_QUEUE_LENGTH, + OID_802_3_PERMANENT_ADDRESS, + OID_802_3_CURRENT_ADDRESS, + OID_802_3_MULTICAST_LIST, + OID_802_3_MAC_OPTIONS, + OID_802_3_MAXIMUM_LIST_SIZE, + OID_802_3_RCV_ERROR_ALIGNMENT, + OID_802_3_XMIT_ONE_COLLISION, + OID_802_3_XMIT_MORE_COLLISIONS, + OID_802_3_XMIT_DEFERRED, + OID_802_3_XMIT_MAX_COLLISIONS, + OID_802_3_RCV_OVERRUN, + OID_802_3_XMIT_UNDERRUN, + OID_802_3_XMIT_HEARTBEAT_FAILURE, + OID_802_3_XMIT_TIMES_CRS_LOST, + OID_802_3_XMIT_LATE_COLLISIONS, + OID_PNP_CAPABILITIES, + OID_PNP_SET_POWER, + OID_PNP_QUERY_POWER, + OID_PNP_ADD_WAKE_UP_PATTERN, + OID_PNP_REMOVE_WAKE_UP_PATTERN, + OID_PNP_ENABLE_WAKE_UP, + OID_TCP_TASK_OFFLOAD +}; + +static NDIS_STATUS OnOidSetNdis5Offload(PARANDIS_ADAPTER *pContext, tOidDesc *pOid); +static NDIS_STATUS CreateOffloadInfo5ForQuery(PARANDIS_ADAPTER *pContext, tOidDesc *pOid, PVOID *ppInfo, PULONG pulSize); +static NDIS_STATUS CreateOffloadInfo5Internal(PARANDIS_ADAPTER *pContext, PVOID *ppInfo, PULONG pulSize, PCCHAR reason, NDIS_TASK_OFFLOAD_HEADER *pHeader); + +/********************************************************** +Structure defining how to process all the oids +***********************************************************/ +// oid e f ok flags set procedure +static const tOidWhatToDo OidsDB[] = +{ +OIDENTRY(OID_GEN_SUPPORTED_LIST, 2,2,4, ohfQueryStat ), +OIDENTRY(OID_GEN_HARDWARE_STATUS, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_MEDIA_SUPPORTED, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_MEDIA_IN_USE, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_MAXIMUM_LOOKAHEAD, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_MAXIMUM_FRAME_SIZE, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_LINK_SPEED, 6,0,6, ohfQuery ), +OIDENTRY(OID_GEN_TRANSMIT_BUFFER_SPACE, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_RECEIVE_BUFFER_SPACE, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_TRANSMIT_BLOCK_SIZE, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_RECEIVE_BLOCK_SIZE, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_VENDOR_ID, 2,0,4, ohfQueryStat ), +OIDENTRY(OID_GEN_VENDOR_DESCRIPTION, 2,2,4, ohfQuery ), +OIDENTRYPROC(OID_GEN_CURRENT_PACKET_FILTER, 2,0,4, ohfQuerySet, ParaNdis_OnSetPacketFilter), +OIDENTRYPROC(OID_GEN_CURRENT_LOOKAHEAD, 2,0,4, ohfQuerySet, ParaNdis_OnSetLookahead), +OIDENTRY(OID_GEN_DRIVER_VERSION, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_MAXIMUM_TOTAL_SIZE, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_PROTOCOL_OPTIONS, 2,0,4, 0 ), +OIDENTRY(OID_GEN_MAC_OPTIONS, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_MEDIA_CONNECT_STATUS, 6,0,6, ohfQuery ), +OIDENTRY(OID_GEN_MAXIMUM_SEND_PACKETS, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_VENDOR_DRIVER_VERSION, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_SUPPORTED_GUIDS, 2,2,4, 0 ), +OIDENTRY(OID_GEN_TRANSPORT_HEADER_OFFSET, 2,4,4, 0 ), +OIDENTRY(OID_GEN_MEDIA_CAPABILITIES, 2,4,4, 0 ), +OIDENTRY(OID_GEN_PHYSICAL_MEDIUM, 2,4,4, 0 ), +OIDENTRY(OID_GEN_XMIT_OK, 6,0,6, ohfQuery3264 ), +OIDENTRY(OID_GEN_RCV_OK, 6,0,4, ohfQuery3264 ), +OIDENTRY(OID_GEN_XMIT_ERROR, 6,0,6, ohfQuery3264 ), +OIDENTRY(OID_GEN_RCV_ERROR, 6,0,6, ohfQuery3264 ), +OIDENTRY(OID_GEN_RCV_NO_BUFFER, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_GEN_DIRECTED_BYTES_XMIT, 2,4,4, 0 ), +OIDENTRY(OID_GEN_DIRECTED_FRAMES_XMIT, 2,4,4, 0 ), +OIDENTRY(OID_GEN_MULTICAST_BYTES_XMIT, 2,4,4, 0 ), +OIDENTRY(OID_GEN_MULTICAST_FRAMES_XMIT, 2,4,4, 0 ), +OIDENTRY(OID_GEN_BROADCAST_BYTES_XMIT, 2,4,4, 0 ), +OIDENTRY(OID_GEN_BROADCAST_FRAMES_XMIT, 2,4,4, 0 ), +OIDENTRY(OID_GEN_DIRECTED_BYTES_RCV, 2,4,4, 0 ), +OIDENTRY(OID_GEN_DIRECTED_FRAMES_RCV, 2,4,4, 0 ), +OIDENTRY(OID_GEN_MULTICAST_BYTES_RCV, 2,4,4, 0 ), +OIDENTRY(OID_GEN_MULTICAST_FRAMES_RCV, 2,4,4, 0 ), +OIDENTRY(OID_GEN_BROADCAST_BYTES_RCV, 2,4,4, 0 ), +OIDENTRY(OID_GEN_BROADCAST_FRAMES_RCV, 2,4,4, 0 ), +OIDENTRY(OID_GEN_RCV_CRC_ERROR, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_GEN_TRANSMIT_QUEUE_LENGTH, 2,0,4, ohfQuery ), +OIDENTRY(OID_GEN_GET_TIME_CAPS, 2,4,4, 0 ), +OIDENTRY(OID_GEN_GET_NETCARD_TIME, 2,4,4, 0 ), +OIDENTRY(OID_GEN_NETCARD_LOAD, 2,4,4, 0 ), +OIDENTRY(OID_GEN_DEVICE_PROFILE, 2,4,4, 0 ), +OIDENTRY(OID_GEN_INIT_TIME_MS, 2,4,4, 0 ), +OIDENTRY(OID_GEN_RESET_COUNTS, 2,4,4, 0 ), +OIDENTRY(OID_GEN_MEDIA_SENSE_COUNTS, 2,4,4, 0 ), +OIDENTRY(OID_PNP_CAPABILITIES, 2,0,4, ohfQuery ), +OIDENTRY(OID_PNP_QUERY_POWER, 2,0,4, ohfQuery ), +OIDENTRY(OID_802_3_PERMANENT_ADDRESS, 2,0,4, ohfQueryStat ), +OIDENTRY(OID_802_3_CURRENT_ADDRESS, 2,0,4, ohfQueryStat ), +OIDENTRY(OID_802_3_MAXIMUM_LIST_SIZE, 2,0,4, ohfQuery ), +OIDENTRY(OID_802_3_MAC_OPTIONS, 2,4,4, ohfQuery ), +OIDENTRY(OID_802_3_RCV_ERROR_ALIGNMENT, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_802_3_XMIT_ONE_COLLISION, 2,4,4, ohfQuery3264 ), +OIDENTRY(OID_802_3_XMIT_MORE_COLLISIONS, 2,4,4, ohfQuery3264 ), +OIDENTRY(OID_802_3_XMIT_DEFERRED, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_802_3_XMIT_MAX_COLLISIONS, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_802_3_RCV_OVERRUN, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_802_3_XMIT_UNDERRUN, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_802_3_XMIT_HEARTBEAT_FAILURE, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_802_3_XMIT_TIMES_CRS_LOST, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_802_3_XMIT_LATE_COLLISIONS, 2,0,4, ohfQuery3264 ), +OIDENTRY(OID_GEN_MACHINE_NAME, 2,4,4, 0 ), +OIDENTRY(OID_IP4_OFFLOAD_STATS, 4,4,4, 0 ), +OIDENTRY(OID_IP6_OFFLOAD_STATS, 4,4,4, 0 ), +OIDENTRY(OID_802_11_CAPABILITY, 4,4,4, 0 ), +OIDENTRYPROC(OID_PNP_ADD_WAKE_UP_PATTERN, 2,0,4, ohfSet, ParaNdis_OnAddWakeupPattern), +OIDENTRYPROC(OID_PNP_REMOVE_WAKE_UP_PATTERN, 2,0,4, ohfSet, ParaNdis_OnRemoveWakeupPattern), +OIDENTRYPROC(OID_PNP_ENABLE_WAKE_UP, 2,0,4, ohfQuerySet, ParaNdis_OnEnableWakeup), +OIDENTRYPROC(OID_PNP_SET_POWER, 2,0,4, ohfSet | ohfSetMoreOK, ParaNdis_OnSetPower), +OIDENTRYPROC(OID_GEN_CURRENT_LOOKAHEAD, 2,0,4, ohfQuerySet, ParaNdis_OnSetLookahead), +OIDENTRYPROC(OID_GEN_CURRENT_PACKET_FILTER, 2,0,4, ohfQuerySet, ParaNdis_OnSetPacketFilter), +OIDENTRYPROC(OID_802_3_MULTICAST_LIST, 2,0,4, ohfQuerySet, ParaNdis_OnOidSetMulticastList), +OIDENTRY(OID_FFP_SUPPORT, 2,4,4, 0 ), +OIDENTRYPROC(OID_TCP_TASK_OFFLOAD, 0,0,0, ohfQuerySet, OnOidSetNdis5Offload), +OIDENTRYPROC(OID_GEN_VLAN_ID, 0,4,4, ohfQuerySet, ParaNdis_OnSetVlanId), +OIDENTRY(0x00010203 /*(OID_GEN_RECEIVE_SCALE_CAPABILITIES)*/, 2,4,4, 0 ), +OIDENTRY(0x0001021F /*(OID_GEN_RECEIVE_HASH)*/, 2,4,4, 0 ), +OIDENTRY(0, 4,4,4, 0), +}; + +/********************************************************** +Returns to common query processor the array of supported oids +***********************************************************/ +void ParaNdis_GetSupportedOid(PVOID *pOidsArray, PULONG pLength) +{ + *pOidsArray = SupportedOids; + *pLength = sizeof(SupportedOids); +} + + +/***************************************************************** +Handles NDIS5 specific OID, all the rest handled by common handler +*****************************************************************/ +static NDIS_STATUS ParaNdis_OidQuery(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + NDIS_STATUS status; + BOOLEAN bFreeInfo = FALSE; + PVOID pInfo = NULL; + ULONG ulSize = 0; + ULONG ulLinkSpeed = 0; + + switch(pOid->Oid) + { + case OID_TCP_TASK_OFFLOAD: + status = CreateOffloadInfo5ForQuery(pContext, pOid, &pInfo, &ulSize); + bFreeInfo = pInfo != NULL; + break; + case OID_GEN_LINK_SPEED: + { + /* units are 100 bps */ + ulLinkSpeed = (ULONG)(PARANDIS_FORMAL_LINK_SPEED / 100); + pInfo = &ulLinkSpeed; + ulSize = sizeof(ulLinkSpeed); + status = NDIS_STATUS_SUCCESS; + } + break; + default: + return ParaNdis_OidQueryCommon(pContext, pOid); + } + if (status == NDIS_STATUS_SUCCESS) + { + status = ParaNdis_OidQueryCopy(pOid, pInfo, ulSize, bFreeInfo); + } + else if (bFreeInfo) + { + NdisFreeMemory(pInfo, 0, 0); + } + return status; +} + +/********************************************************** +NDIS required procedure of OID QUERY +Just passes all the supported oids to common query procedure +Return value: + NDIS_STATUS as returned from common code + NDIS_STATUS_NOT_SUPPORTED if suppressed in the table +***********************************************************/ +NDIS_STATUS NTAPI ParaNdis5_QueryOID(IN NDIS_HANDLE MiniportAdapterContext, + IN NDIS_OID Oid, + IN PVOID InformationBuffer, + IN ULONG InformationBufferLength, + OUT PULONG BytesWritten, + OUT PULONG BytesNeeded) +{ + NDIS_STATUS status = NDIS_STATUS_NOT_SUPPORTED; + tOidWhatToDo Rules; + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + tOidDesc _oid; + ParaNdis_GetOidSupportRules(Oid, &Rules, OidsDB); + _oid.ulToDoFlags = Rules.Flags; + *BytesWritten = 0; + *BytesNeeded = 0; + ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, Oid, 0, 1); + DPrintf(Rules.nEntryLevel, ("[%s], id 0x%X(%s) of %d", __FUNCTION__, + Oid, + Rules.name, + InformationBufferLength)); + _oid.Oid = Oid; + _oid.InformationBuffer = InformationBuffer; + _oid.InformationBufferLength = InformationBufferLength; + _oid.pBytesNeeded = (PUINT)BytesNeeded; + _oid.pBytesRead = (PUINT)BytesWritten; + _oid.pBytesWritten = (PUINT)BytesWritten; + if (pContext->bSurprizeRemoved) status = NDIS_STATUS_NOT_ACCEPTED; + else if (Rules.Flags & ohfQuery) status = ParaNdis_OidQuery(pContext, &_oid); + + + ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, Oid, status, 0); + DPrintf((status != NDIS_STATUS_SUCCESS) ? Rules.nExitFailLevel : Rules.nExitOKLevel, + ("[%s] , id 0x%X(%s) (%X), written %d, needed %d", + __FUNCTION__, + Rules.oid, + Rules.name, + status, + *BytesWritten, + *BytesNeeded)); + return status; + +} + +/********************************************************** +NDIS required procedure of OID SET +Just passes all the supported oids to common set procedure +Return value: + NDIS_STATUS as returned from set procedure + NDIS_STATUS_NOT_SUPPORTED if support not defined in the table +***********************************************************/ +NDIS_STATUS NTAPI ParaNdis5_SetOID(IN NDIS_HANDLE MiniportAdapterContext, + IN NDIS_OID Oid, + IN PVOID InformationBuffer, + IN ULONG InformationBufferLength, + OUT PULONG BytesRead, + OUT PULONG BytesNeeded) +{ + NDIS_STATUS status = NDIS_STATUS_NOT_SUPPORTED; + tOidWhatToDo Rules; + PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportAdapterContext; + tOidDesc _oid; + ParaNdis_GetOidSupportRules(Oid, &Rules, OidsDB); + _oid.ulToDoFlags = Rules.Flags; + *BytesRead = 0; + *BytesNeeded = 0; + ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, Oid, 1, 1); + DPrintf(Rules.nEntryLevel, ("[%s], id 0x%X(%s) of %d", __FUNCTION__, + Oid, + Rules.name, + InformationBufferLength)); + _oid.Oid = Oid; + _oid.InformationBuffer = InformationBuffer; + _oid.InformationBufferLength = InformationBufferLength; + _oid.pBytesNeeded = (PUINT)BytesNeeded; + _oid.pBytesRead = (PUINT)BytesRead; + _oid.pBytesWritten = (PUINT)BytesRead; + if (pContext->bSurprizeRemoved) status = NDIS_STATUS_NOT_ACCEPTED; + else if (Rules.Flags & ohfSet) + { + if (Rules.OidSetProc) status = Rules.OidSetProc(pContext, &_oid); + else + { + DPrintf(0, ("[%s] ERROR in OID redirection table", __FUNCTION__)); + status = NDIS_STATUS_INVALID_OID; + } + } + ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, Oid, status, 0); + if (status != NDIS_STATUS_PENDING) + { + DPrintf((status != NDIS_STATUS_SUCCESS) ? Rules.nExitFailLevel : Rules.nExitOKLevel, + ("[%s] , id 0x%X(%s) (%X), read %d, needed %d", __FUNCTION__, + Rules.oid, Rules.name, status, *BytesRead, *BytesNeeded)); + } + return status; +} + +static void NTAPI OnSetPowerWorkItem(NDIS_WORK_ITEM * pWorkItem, PVOID Context) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + tPowerWorkItem *pwi = (tPowerWorkItem *)pWorkItem; + PARANDIS_ADAPTER *pContext = pwi->pContext; + if (pwi->state == (NDIS_DEVICE_POWER_STATE)NetDeviceStateD0) + { + status = ParaNdis_PowerOn(pContext); + } + else + { + ParaNdis_PowerOff(pContext); + } + NdisFreeMemory(pwi, 0, 0); + ParaNdis_DebugHistory(pContext, hopOidRequest, NULL, OID_PNP_SET_POWER, 0, 2); + NdisMSetInformationComplete(pContext->MiniportHandle, status); +} + +/********************************************************** +NDIS5.X handler of power management +***********************************************************/ +NDIS_STATUS ParaNdis_OnSetPower(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + NDIS_STATUS status; + NDIS_DEVICE_POWER_STATE newState; + DEBUG_ENTRY(0); + status = ParaNdis_OidSetCopy(pOid, &newState, sizeof(newState)); + if (status == NDIS_STATUS_SUCCESS) + { + tPowerWorkItem *pwi = ParaNdis_AllocateMemory(pContext, sizeof(tPowerWorkItem)); + status = NDIS_STATUS_FAILURE; + if (pwi) + { + pwi->pContext = pContext; + pwi->state = newState; + NdisInitializeWorkItem(&pwi->wi, OnSetPowerWorkItem, pwi); + if (NdisScheduleWorkItem(&pwi->wi) == NDIS_STATUS_SUCCESS) + { + status = NDIS_STATUS_PENDING; + } + else + NdisFreeMemory(pwi, 0, 0); + } + } + return status; +} + +/*************************************************** +check that the incoming NDIS_TASK_TCP_IP_CHECKSUM +does not enable options which we do not support +***************************************************/ +static BOOLEAN IsValidPcs( PARANDIS_ADAPTER *pContext, NDIS_TASK_TCP_IP_CHECKSUM *pcs) +{ + tOffloadSettingsFlags f; + BOOLEAN bInvalid = FALSE; + ParaNdis_ResetOffloadSettings(pContext, &f, NULL); + bInvalid |= pcs->V4Receive.IpChecksum && !f.fRxIPChecksum; + bInvalid |= pcs->V4Receive.IpOptionsSupported && !f.fRxIPOptions; + bInvalid |= pcs->V4Receive.TcpChecksum && !f.fRxTCPChecksum; + bInvalid |= pcs->V4Receive.TcpOptionsSupported && !f.fRxTCPOptions; + bInvalid |= pcs->V4Receive.UdpChecksum && !f.fRxUDPChecksum; + + bInvalid |= pcs->V4Transmit.IpChecksum && !f.fTxIPChecksum; + bInvalid |= pcs->V4Transmit.IpOptionsSupported && !f.fTxIPOptions; + bInvalid |= pcs->V4Transmit.TcpChecksum && !f.fTxTCPChecksum; + bInvalid |= pcs->V4Transmit.TcpOptionsSupported && !f.fTxTCPOptions; + bInvalid |= pcs->V4Transmit.UdpChecksum && !f.fTxUDPChecksum; + return !bInvalid; +} + +/*************************************************** +check that the incoming NDIS_TASK_TCP_LARGE_SEND +does not enable options which we do not support +***************************************************/ +static BOOLEAN IsValidPls( PARANDIS_ADAPTER *pContext, NDIS_TASK_TCP_LARGE_SEND *pls) +{ + tOffloadSettingsFlags f; + BOOLEAN bInvalid = FALSE; + ParaNdis_ResetOffloadSettings(pContext, &f, NULL); + bInvalid |= pls->Version != NDIS_TASK_TCP_LARGE_SEND_V0; + bInvalid |= pls->IpOptions && !f.fTxLsoIP; + bInvalid |= pls->TcpOptions && !f.fTxLsoTCP; + bInvalid |= (pls->IpOptions || pls->TcpOptions || pls->MaxOffLoadSize) && !f.fTxLso; + bInvalid |= pls->MinSegmentCount < PARANDIS_MIN_LSO_SEGMENTS; + return !bInvalid; +} + +static NDIS_STATUS ParseOffloadTask( + PARANDIS_ADAPTER *pContext, + BOOLEAN bApply, /* for 'set'*/ + NDIS_TASK_OFFLOAD *pto, + ULONG offset, + ULONG maxSize) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + NDIS_TASK_TCP_IP_CHECKSUM *pcs = NULL; + NDIS_TASK_TCP_LARGE_SEND *pls = NULL; + NDIS_TASK_IPSEC *pips = NULL; + LPCSTR sName = NULL; + ULONG TaskBufferSize = 0, tailOffset = 0; + switch(pto->Task) + { + case TcpIpChecksumNdisTask: + pcs = (NDIS_TASK_TCP_IP_CHECKSUM *)pto->TaskBuffer; + TaskBufferSize = sizeof(*pcs); + sName = "TcpIpChecksumNdisTask"; + break; + case TcpLargeSendNdisTask: + pls = (NDIS_TASK_TCP_LARGE_SEND *)pto->TaskBuffer; + TaskBufferSize = sizeof(*pls); + sName = "TcpLargeSendNdisTask"; + break; + case IpSecNdisTask: + pips = (NDIS_TASK_IPSEC *)pto->TaskBuffer; + TaskBufferSize = sizeof(*pips); + sName = "IpSecNdisTask"; + break; + default: + break; + } + tailOffset = offset + RtlPointerToOffset(pto, &pto->TaskBuffer) + TaskBufferSize; + if (!TaskBufferSize) + { + DPrintf(0, ("[%s], unknown offload task %d", __FUNCTION__, pto->Task)); + } + else if (tailOffset > maxSize) + { + DPrintf(0, ("[%s], can not parse %s at offset %d, tail at %d", __FUNCTION__, sName, offset, tailOffset)); + status = NDIS_STATUS_BUFFER_TOO_SHORT; + } + else if (TaskBufferSize > pto->TaskBufferLength) + { + DPrintf(0, ("[%s], invalid size of %s", __FUNCTION__, sName)); + status = NDIS_STATUS_BUFFER_TOO_SHORT; + } + else if (pcs) + { + DPrintf(0, ("[%s], parsing %s", __FUNCTION__, sName)); + DPrintf(0, ("Rx4: checksum IP(%d),TCP(%d),UDP(%d), options IP(%d),TCP(%d)", + pcs->V4Receive.IpChecksum, pcs->V4Receive.TcpChecksum, pcs->V4Receive.UdpChecksum, + pcs->V4Receive.IpOptionsSupported, pcs->V4Receive.TcpOptionsSupported + )); + DPrintf(0, ("Tx4: checksum IP(%d),TCP(%d),UDP(%d), options IP(%d),TCP(%d)", + pcs->V4Transmit.IpChecksum, pcs->V4Transmit.TcpChecksum, pcs->V4Transmit.UdpChecksum, + pcs->V4Transmit.IpOptionsSupported, pcs->V4Transmit.TcpOptionsSupported + )); + if (bApply) + { + if (IsValidPcs(pContext, pcs)) + { + tOffloadSettingsFlags *pf = &pContext->Offload.flags; + pf->fTxIPChecksum = !!pcs->V4Transmit.IpChecksum; + pf->fTxTCPChecksum = !!pcs->V4Transmit.TcpChecksum; + pf->fTxUDPChecksum = !!pcs->V4Transmit.UdpChecksum; + pf->fTxTCPOptions = !!pcs->V4Transmit.TcpOptionsSupported; + pf->fTxIPOptions = !!pcs->V4Transmit.IpOptionsSupported; + pf->fRxIPChecksum = !!pcs->V4Receive.IpChecksum; + pf->fRxIPOptions = !!pcs->V4Receive.IpOptionsSupported; + pf->fRxTCPChecksum = !!pcs->V4Receive.TcpChecksum; + pf->fRxTCPOptions = !!pcs->V4Receive.TcpOptionsSupported; + pf->fRxUDPChecksum = !!pcs->V4Receive.UdpChecksum; + } + else + status = STATUS_NOT_SUPPORTED; + } + } + else if (pls) + { + DPrintf(0, ("[%s], parsing %s version %d", __FUNCTION__, sName, pls->Version)); + DPrintf(0, ("options IP(%d),TCP(%d),MaxOffload %d, MinSegments %d", + pls->IpOptions, pls->TcpOptions, pls->MaxOffLoadSize, pls->MinSegmentCount)); + if (bApply) + { + if (IsValidPls(pContext, pls)) + { + tOffloadSettingsFlags *pf = &pContext->Offload.flags; + pf->fTxLsoIP = !!pls->IpOptions; + pf->fTxLsoTCP = !!pls->TcpOptions; + pf->fTxLso = 1; + } + else + status = STATUS_NOT_SUPPORTED; + } + } + else if (pips) + { + DPrintf(0, ("[%s], parsing %s", __FUNCTION__, sName)); + } + return status; +} + +static FORCEINLINE BOOLEAN ValidateOffloadHeader(NDIS_TASK_OFFLOAD_HEADER *pth) +{ + return + pth->EncapsulationFormat.Encapsulation == IEEE_802_3_Encapsulation && + pth->Version == NDIS_TASK_OFFLOAD_VERSION && + pth->Size == sizeof(*pth); +} + +static NDIS_STATUS ParseOffload( + PARANDIS_ADAPTER *pContext, + NDIS_TASK_OFFLOAD_HEADER *pth, + ULONG size, + BOOLEAN bApply, + PCCHAR reason, + BOOLEAN headerOnly) +{ + NDIS_STATUS status = NDIS_STATUS_NOT_SUPPORTED; + BOOLEAN bReset = FALSE; + ULONG ulNoCapabilities = 0; + DPrintf(0, ("[%s](%s), format %d", __FUNCTION__, reason, + pth->EncapsulationFormat.Encapsulation)); + if (ValidateOffloadHeader(pth)) + { + PUCHAR p = (PUCHAR)pth; + LONG offset = (LONG)pth->OffsetFirstTask; + status = NDIS_STATUS_SUCCESS; + DPrintf(0, ("[%s], header version %d, ip header at %d, fixed %d, first at %d", __FUNCTION__, + pth->Version, + pth->EncapsulationFormat.EncapsulationHeaderSize, + pth->EncapsulationFormat.Flags.FixedHeaderSize, + offset)); + if (!offset && bApply) + { + /* disable all the capabilities */ + // according to DDK, 0 at first task offset means disabling all the capabilities + DPrintf(0, ("[%s] RESETTING offload capabilities", __FUNCTION__)); + ParaNdis_ResetOffloadSettings(pContext, NULL, &ulNoCapabilities); + bReset = TRUE; + } + while (!headerOnly && offset > 0 && (offset + sizeof(NDIS_TASK_OFFLOAD)) < size) + { + NDIS_TASK_OFFLOAD *pto = (NDIS_TASK_OFFLOAD *)(p + offset); + if (pto->Version != NDIS_TASK_OFFLOAD_VERSION) + { + DPrintf(0, ("[%s], unexpected TO version %d at %d", + __FUNCTION__, pto->Version, offset)); + status = NDIS_STATUS_INVALID_DATA; + break; + } + status = ParseOffloadTask(pContext, bApply, pto, offset, size); + if (!pto->OffsetNextTask || status != NDIS_STATUS_SUCCESS) + break; + offset += pto->OffsetNextTask; + } + } + if (status == STATUS_SUCCESS && bApply) + pContext->Offload.ipHeaderOffset = bReset ? 0: pth->EncapsulationFormat.EncapsulationHeaderSize; + return status; +} + +/******************************************************** +Fill offload query structure according to our capabilities +********************************************************/ +static BOOLEAN GetTcpIpCheckSumCapabilities( + PARANDIS_ADAPTER *pContext, + NDIS_TASK_TCP_IP_CHECKSUM *pcs) +{ + tOffloadSettingsFlags f; + NdisZeroMemory(pcs, sizeof(*pcs)); + ParaNdis_ResetOffloadSettings(pContext, &f, NULL); + pcs->V4Transmit.IpChecksum = !!f.fTxIPChecksum; + pcs->V4Transmit.TcpChecksum = !!f.fTxTCPChecksum; + pcs->V4Transmit.UdpChecksum = !!f.fTxUDPChecksum; + pcs->V4Transmit.IpOptionsSupported = !!f.fTxIPOptions; + pcs->V4Transmit.TcpOptionsSupported = !!f.fTxTCPOptions; + pcs->V4Receive.IpChecksum = !!f.fRxIPChecksum; + pcs->V4Receive.IpOptionsSupported = !!f.fRxIPOptions; + pcs->V4Receive.TcpChecksum = !!f.fRxTCPChecksum; + pcs->V4Receive.TcpOptionsSupported = !!f.fRxTCPOptions; + pcs->V4Receive.UdpChecksum = !!f.fRxUDPChecksum; + + return + pcs->V4Transmit.IpChecksum || + pcs->V4Transmit.TcpChecksum || + pcs->V4Transmit.UdpChecksum || + pcs->V4Receive.IpChecksum || + pcs->V4Receive.TcpChecksum || + pcs->V4Receive.UdpChecksum; +} + +/******************************************************** +Fill offload query structure according to our capabilities +********************************************************/ +static BOOLEAN GetLargeSendCapabilities( + PARANDIS_ADAPTER *pContext, + NDIS_TASK_TCP_LARGE_SEND *pls) +{ + tOffloadSettingsFlags f; + NdisZeroMemory(pls, sizeof(*pls)); + ParaNdis_ResetOffloadSettings(pContext, &f, NULL); + pls->Version = NDIS_TASK_TCP_LARGE_SEND_V0; + pls->IpOptions = !!f.fTxLsoIP; + pls->TcpOptions = !!f.fTxLsoTCP; + pls->MinSegmentCount = PARANDIS_MIN_LSO_SEGMENTS; + pls->MaxOffLoadSize = pContext->Offload.maxPacketSize; + return f.fTxLso != 0; +} + +/******************************************************** +Allocate and fill our capabilities, dependent on registry setting +Note than NDIS test of WLK1.2 and 1.3 fail (offloadmisc) +if CS capability indicated and passes if only LSO indicated +********************************************************/ +NDIS_STATUS CreateOffloadInfo5Internal( + PARANDIS_ADAPTER *pContext, + PVOID *ppInfo, + PULONG pulSize, + PCCHAR reason, + NDIS_TASK_OFFLOAD_HEADER *pHeader) +{ + NDIS_STATUS status = NDIS_STATUS_RESOURCES; + ULONG size = + sizeof(NDIS_TASK_OFFLOAD_HEADER) + + sizeof(NDIS_TASK_OFFLOAD) + sizeof(NDIS_TASK_TCP_IP_CHECKSUM) + + sizeof(NDIS_TASK_OFFLOAD) + sizeof(NDIS_TASK_TCP_LARGE_SEND); + *ppInfo = ParaNdis_AllocateMemory(pContext, size); + if (*ppInfo) + { + ULONG flags = 0; + NDIS_TASK_TCP_IP_CHECKSUM cs; + NDIS_TASK_TCP_LARGE_SEND lso; + flags |= GetTcpIpCheckSumCapabilities(pContext, &cs) ? 2 : 0; + flags |= GetLargeSendCapabilities(pContext, &lso) ? 1 : 0; + if (flags) + { + NDIS_TASK_OFFLOAD_HEADER *ph; + NDIS_TASK_OFFLOAD *pto; + UINT i = 0; + ULONG *pOffset; + PVOID base; + *pulSize = size; + NdisZeroMemory(*ppInfo, size); + ph = (NDIS_TASK_OFFLOAD_HEADER *)*ppInfo; + *ph = *pHeader; + pto = (NDIS_TASK_OFFLOAD *)(ph + 1); + base = ph; + pOffset = &ph->OffsetFirstTask; + ph->OffsetFirstTask = 0; + do + { + if (flags & (1 << i)) + { + flags &= ~(1 << i); + pto->Version = NDIS_TASK_OFFLOAD_VERSION; + pto->Size = sizeof(*pto); + *pOffset = RtlPointerToOffset(base, pto); + base = pto; + pOffset = &pto->OffsetNextTask; + switch(i) + { + case 1: + { + NDIS_TASK_TCP_IP_CHECKSUM *pcs = (NDIS_TASK_TCP_IP_CHECKSUM *)pto->TaskBuffer; + pto->Task = TcpIpChecksumNdisTask; + pto->TaskBufferLength = sizeof(*pcs); + NdisMoveMemory(pcs, &cs, sizeof(cs)); + pto = (NDIS_TASK_OFFLOAD *)(pcs + 1); + break; + } + case 0: + { + NDIS_TASK_TCP_LARGE_SEND *pls = (NDIS_TASK_TCP_LARGE_SEND *)pto->TaskBuffer; + pto->Task = TcpLargeSendNdisTask; + pto->TaskBufferLength = sizeof(*pls); + NdisMoveMemory(pls, &lso, sizeof(lso)); + pto = (NDIS_TASK_OFFLOAD *)(pls + 1); + break; + } + default: + break; + } + } + ++i; + } while (flags); + status = ParseOffload(pContext, ph, size, FALSE, reason, FALSE); + } + else + { + NdisFreeMemory(*ppInfo, 0, 0); + *ppInfo = NULL; + status = NDIS_STATUS_NOT_SUPPORTED; + } + } + return status; +} + + +NDIS_STATUS CreateOffloadInfo5ForQuery( + PARANDIS_ADAPTER *pContext, + tOidDesc *pOid, + PVOID *ppInfo, + PULONG pulSize) +{ + NDIS_TASK_OFFLOAD_HEADER *pth = (NDIS_TASK_OFFLOAD_HEADER *)pOid->InformationBuffer; + NDIS_STATUS status; + *ppInfo = NULL; + *pulSize = 0; + if (pOid->InformationBufferLength < sizeof(*pth)) pth = &ReservedHeader; + status = ParseOffload(pContext, pth, pOid->InformationBufferLength, FALSE, "query enter", TRUE); + if (status == NDIS_STATUS_SUCCESS) + { + CreateOffloadInfo5Internal(pContext, ppInfo, pulSize, "QUERY", pth); + } + return status; +} + +NDIS_STATUS OnOidSetNdis5Offload(PARANDIS_ADAPTER *pContext, tOidDesc *pOid) +{ + NDIS_STATUS status; + status = ParseOffload(pContext, (NDIS_TASK_OFFLOAD_HEADER *)pOid->InformationBuffer, + pOid->InformationBufferLength, TRUE, "SET", FALSE); + if (status == STATUS_SUCCESS) + { +#if 0 // only for logging after SET + PVOID pInfo = NULL; + ULONG dummy = 0; + CreateOffloadInfo5Internal(pContext, &pInfo, &dummy, "UPDATED", &ReservedHeader); + if (pInfo) NdisFreeMemory(pInfo, 0, 0); +#endif + *pOid->pBytesRead = pOid->InformationBufferLength; + } + else + { + DPrintf(0, ("[%s], restoring after unsuccessful set", __FUNCTION__)); + pContext->Offload = pContext->Offload; + } + return status; +} diff --git a/drivers/network/dd/netkvm/wxp/ParaNdis5.h b/drivers/network/dd/netkvm/wxp/ParaNdis5.h new file mode 100644 index 00000000000..6b3f8357974 --- /dev/null +++ b/drivers/network/dd/netkvm/wxp/ParaNdis5.h @@ -0,0 +1,88 @@ +/* + * This file contains NDIS5.X specific procedure definitions in NDIS driver. + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#ifndef _PARA_NDIS5_H +#define _PARA_NDIS5_H + +#include "ndis56common.h" + + +NDIS_STATUS NTAPI ParaNdis5_SetOID(IN NDIS_HANDLE MiniportAdapterContext, + IN NDIS_OID Oid, + IN PVOID InformationBuffer, + IN ULONG InformationBufferLength, + OUT PULONG BytesRead, + OUT PULONG BytesNeeded); + +NDIS_STATUS NTAPI ParaNdis5_QueryOID(IN NDIS_HANDLE MiniportAdapterContext, + IN NDIS_OID Oid, + IN PVOID InformationBuffer, + IN ULONG InformationBufferLength, + OUT PULONG BytesWritten, + OUT PULONG BytesNeeded); + + +VOID NTAPI ParaNdis5_SendPackets(IN NDIS_HANDLE MiniportAdapterContext, + IN PPNDIS_PACKET PacketArray, + IN UINT NumberOfPackets); + + +VOID NTAPI ParaNdis5_ReturnPacket(IN NDIS_HANDLE MiniportAdapterContext,IN PNDIS_PACKET Packet); + +VOID ParaNdis5_IndicateConnect(PARANDIS_ADAPTER *pContext, BOOLEAN bConnected); + + +//NDIS 5.1 related functions +VOID NTAPI ParaNdis5_CancelSendPackets(IN NDIS_HANDLE MiniportAdapterContext,IN PVOID CancelId); + +NDIS_STATUS ParaNdis5_StopSend( + PARANDIS_ADAPTER *pContext, + BOOLEAN bStop, + ONPAUSECOMPLETEPROC Callback); +NDIS_STATUS ParaNdis5_StopReceive( + PARANDIS_ADAPTER *pContext, + BOOLEAN bStop, + ONPAUSECOMPLETEPROC Callback + ); +VOID NTAPI ParaNdis5_HandleDPC( + IN NDIS_HANDLE MiniportAdapterContext); + +typedef struct _tagPowerWorkItem +{ + NDIS_WORK_ITEM wi; + PPARANDIS_ADAPTER pContext; + NDIS_DEVICE_POWER_STATE state; +}tPowerWorkItem; + +typedef struct _tagGeneralWorkItem +{ + NDIS_WORK_ITEM wi; + PPARANDIS_ADAPTER pContext; +}tGeneralWorkItem; + +#endif // _PARA_NDIS5_H diff --git a/drivers/network/dd/netkvm/wxp/netkvm.inx b/drivers/network/dd/netkvm/wxp/netkvm.inx new file mode 100644 index 00000000000..991b849b8a0 --- /dev/null +++ b/drivers/network/dd/netkvm/wxp/netkvm.inx @@ -0,0 +1,331 @@ +;/*++ +;INX_COPYRIGHT_1 +;INX_COPYRIGHT_2 +; +;Module Name: +; netkvm.inf +; +;Abstract: +; VirtIO Ethernet Adapter +; +;Installation Notes: +; Using Devcon: Type "devcon install netkvm.inf PCI\VEN_1AF4&DEV_1000&SUBSYS_0001_INX_SUBSYS_VENDOR_ID&REV_00" or +; "devcon install netkvm.inf PCI\VEN_1AF4&DEV_1041&SUBSYS_1100_INX_SUBSYS_VENDOR_ID&REV_01" to install +; +;--*/ + +[version] +Signature = "$Windows NT$" +Class = Net +CatalogFile = netkvm.cat +ClassGUID = {4d36e972-e325-11ce-bfc1-08002be10318} +Provider = %VENDOR% +DriverVer=01/01/2008,0.0.0.1 ; this line will be replaced with stampinf +DriverPackageType = PlugAndPlay +DriverPackageDisplayName = %kvmnet5.DeviceDesc% + +[Manufacturer] +%VENDOR% = NetKVM, NT$ARCH$ + +[NetKVM.NT$ARCH$] +%kvmnet5.DeviceDesc% = kvmnet5.ndi, PCI\VEN_1AF4&DEV_1000&SUBSYS_0001_INX_SUBSYS_VENDOR_ID&REV_00, PCI\VEN_1AF4&DEV_1000 +%kvmnet5.DeviceDesc% = kvmnet5.ndi, PCI\VEN_1AF4&DEV_1041&SUBSYS_1100_INX_SUBSYS_VENDOR_ID&REV_01, PCI\VEN_1AF4&DEV_1041 + + +[kvmnet5.ndi] +Characteristics = 0x84 ; NCF_PHYSICAL | NCF_HAS_UI +BusType = 5 ; PCI +AddReg = kvmnet5.Reg, Parameters +CopyFiles = kvmnet5.CopyFiles +*IfType = 6 +*MediaType = 0 ; NdisMedium802_3 +*PhysicalMediaType = 0 ; NdisPhysicalMediumUnspecified + + +[kvmnet5.ndi.Services] +AddService = netkvm, 2, kvmnet5.Service, kvmnet5.EventLog + + +[kvmnet5.Reg] +HKR, , BusNumber, 0, "0" +HKR, Ndi, Service, 0, "netkvm" +HKR, Ndi\Interfaces, UpperRange, 0, "ndis5" +HKR, Ndi\Interfaces, LowerRange, 0, "ethernet" + +[Parameters] +HKR, Ndi\Params\ConnectRate, ParamDesc, 0, %ConnectRate% +HKR, Ndi\Params\ConnectRate, Default, 0, "1001" +HKR, Ndi\Params\ConnectRate, type, 0, "enum" +HKR, Ndi\Params\ConnectRate\enum, "10", 0, %10M% +HKR, Ndi\Params\ConnectRate\enum, "100", 0, %100M% +HKR, Ndi\Params\ConnectRate\enum, "1001", 0, %1G% +HKR, Ndi\Params\ConnectRate\enum, "10000", 0, %10G% + +HKR, Ndi\Params\Priority, ParamDesc, 0, %Priority% +HKR, Ndi\Params\Priority, Default, 0, "1" +HKR, Ndi\Params\Priority, type, 0, "enum" +HKR, Ndi\Params\Priority\enum, "1", 0, %Enable% +HKR, Ndi\Params\Priority\enum, "0", 0, %Disable% + +HKR, Ndi\Params\*PriorityVLANTag, ParamDesc, 0, %PriorityVlanTag% +HKR, Ndi\Params\*PriorityVLANTag, Default, 0, "3" +HKR, Ndi\Params\*PriorityVLANTag, type, 0, "enum" +HKR, Ndi\Params\*PriorityVLANTag\enum, "3", 0, %Priority_Vlan% +HKR, Ndi\Params\*PriorityVLANTag\enum, "2", 0, %VLan% +HKR, Ndi\Params\*PriorityVLANTag\enum, "1", 0, %PriorityOnly% +HKR, Ndi\Params\*PriorityVLANTag\enum, "0", 0, %Disable% + +HKR, Ndi\Params\DoLog, ParamDesc, 0, %EnableLogging% +HKR, Ndi\Params\DoLog, Default, 0, "1" +HKR, Ndi\Params\DoLog, type, 0, "enum" +HKR, Ndi\Params\DoLog\enum, "1", 0, %Enable% +HKR, Ndi\Params\DoLog\enum, "0", 0, %Disable% + +HKR, Ndi\params\DebugLevel, ParamDesc, 0, %DebugLevel% +HKR, Ndi\params\DebugLevel, type, 0, "int" +HKR, Ndi\params\DebugLevel, default, 0, "0" +HKR, Ndi\params\DebugLevel, min, 0, "0" +HKR, Ndi\params\DebugLevel, max, 0, "8" +HKR, Ndi\params\DebugLevel, step, 0, "1" + +HKR, Ndi\params\LogStatistics, ParamDesc, 0, %LogStatistics% +HKR, Ndi\params\LogStatistics, type, 0, "int" +HKR, Ndi\params\LogStatistics, default, 0, "0" +HKR, Ndi\params\LogStatistics, min, 0, "0" +HKR, Ndi\params\LogStatistics, max, 0, "10000" +HKR, Ndi\params\LogStatistics, step, 0, "1" + +HKR, Ndi\params\MTU, ParamDesc, 0, %MTU% +HKR, Ndi\params\MTU, type, 0, "long" +HKR, Ndi\params\MTU, default, 0, "1500" +HKR, Ndi\params\MTU, min, 0, "500" +HKR, Ndi\params\MTU, max, 0, "65500" +HKR, Ndi\params\MTU, step, 0, "1" + +HKR, Ndi\params\TxCapacity, ParamDesc, 0, %TxCapacity% +HKR, Ndi\params\TxCapacity, type, 0, "enum" +HKR, Ndi\params\TxCapacity, default, 0, "1024" +HKR, Ndi\Params\TxCapacity\enum, "16", 0, %String_16% +HKR, Ndi\Params\TxCapacity\enum, "32", 0, %String_32% +HKR, Ndi\Params\TxCapacity\enum, "64", 0, %String_64% +HKR, Ndi\Params\TxCapacity\enum, "128", 0, %String_128% +HKR, Ndi\Params\TxCapacity\enum, "256", 0, %String_256% +HKR, Ndi\Params\TxCapacity\enum, "512", 0, %String_512% +HKR, Ndi\Params\TxCapacity\enum, "1024", 0, %String_1024% + +HKR, Ndi\params\RxCapacity, ParamDesc, 0, %RxCapacity% +HKR, Ndi\params\RxCapacity, type, 0, "enum" +HKR, Ndi\params\RxCapacity, default, 0, "256" +HKR, Ndi\Params\RxCapacity\enum, "16", 0, %String_16% +HKR, Ndi\Params\RxCapacity\enum, "32", 0, %String_32% +HKR, Ndi\Params\RxCapacity\enum, "64", 0, %String_64% +HKR, Ndi\Params\RxCapacity\enum, "128", 0, %String_128% +HKR, Ndi\Params\RxCapacity\enum, "256", 0, %String_256% +HKR, Ndi\Params\RxCapacity\enum, "512", 0, %String_512% +HKR, Ndi\Params\RxCapacity\enum, "1024", 0, %String_1024% + +HKR, Ndi\Params\MergeableBuf, ParamDesc, 0, %MergeableBuf% +HKR, Ndi\Params\MergeableBuf, Default, 0, "1" +HKR, Ndi\Params\MergeableBuf, type, 0, "enum" +HKR, Ndi\Params\MergeableBuf\enum, "1", 0, %Enable% +HKR, Ndi\Params\MergeableBuf\enum, "0", 0, %Disable% + +HKR, Ndi\params\NetworkAddress, ParamDesc, 0, %NetworkAddress% +HKR, Ndi\params\NetworkAddress, type, 0, "edit" +HKR, Ndi\params\NetworkAddress, Optional, 0, "1" + +HKR, Ndi\Params\Indirect, ParamDesc, 0, %Indirect% +HKR, Ndi\Params\Indirect, Default, 0, "0" +HKR, Ndi\Params\Indirect, type, 0, "enum" +HKR, Ndi\Params\Indirect\enum, "0", 0, %Disable% +HKR, Ndi\Params\Indirect\enum, "1", 0, %Enable% +HKR, Ndi\Params\Indirect\enum, "2", 0, %Enable*% + +HKR, Ndi\Params\OffLoad.TxChecksum, ParamDesc, 0, %OffLoad.TxChecksum% +HKR, Ndi\Params\OffLoad.TxChecksum, Default, 0, "0" +HKR, Ndi\Params\OffLoad.TxChecksum, type, 0, "enum" +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "31", 0, %All% +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "27", 0, %TCPUDPAll% +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "3", 0, %TCPUDPv4% +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "1", 0, %TCPv4% +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "0", 0, %Disable% + +HKR, Ndi\Params\OffLoad.TxLSO, ParamDesc, 0, %OffLoad.TxLSO% +HKR, Ndi\Params\OffLoad.TxLSO, Default, 0, "2" +HKR, Ndi\Params\OffLoad.TxLSO, type, 0, "enum" +HKR, Ndi\Params\OffLoad.TxLSO\enum, "2", 0, %Maximal% +HKR, Ndi\Params\OffLoad.TxLSO\enum, "1", 0, %IPv4% +HKR, Ndi\Params\OffLoad.TxLSO\enum, "0", 0, %Disable% + +HKR, Ndi\Params\OffLoad.RxCS, ParamDesc, 0, %OffLoad.RxCS% +HKR, Ndi\Params\OffLoad.RxCS, Default, 0, "0" +HKR, Ndi\Params\OffLoad.RxCS, type, 0, "enum" +HKR, Ndi\Params\OffLoad.RxCS\enum, "31", 0, %All% +HKR, Ndi\Params\OffLoad.RxCS\enum, "27", 0, %TCPUDPAll% +HKR, Ndi\Params\OffLoad.RxCS\enum, "3", 0, %TCPUDPv4% +HKR, Ndi\Params\OffLoad.RxCS\enum, "1", 0, %TCPv4% +HKR, Ndi\Params\OffLoad.RxCS\enum, "0", 0, %Disable% + +HKR, Ndi\params\ConnectTimer, ParamDesc, 0, %ConnectAfter% +HKR, Ndi\params\ConnectTimer, type, 0, "long" +HKR, Ndi\params\ConnectTimer, default, 0, "0" +HKR, Ndi\params\ConnectTimer, min, 0, "0" +HKR, Ndi\params\ConnectTimer, max, 0, "300000" +HKR, Ndi\params\ConnectTimer, step, 0, "50" + +HKR, Ndi\Params\DpcCheck, ParamDesc, 0, %DPCCheck% +HKR, Ndi\Params\DpcCheck, Default, 0, "0" +HKR, Ndi\Params\DpcCheck, type, 0, "enum" +HKR, Ndi\Params\DpcCheck\enum, "1", 0, %Enable% +HKR, Ndi\Params\DpcCheck\enum, "0", 0, %Disable% +HKR, Ndi\Params\DpcCheck\enum, "2", 0, %Enable*% + +HKR, Ndi\Params\Gather, ParamDesc, 0, %Gather% +HKR, Ndi\Params\Gather, Default, 0, "1" +HKR, Ndi\Params\Gather, type, 0, "enum" +HKR, Ndi\Params\Gather\enum, "1", 0, %Enable% +HKR, Ndi\Params\Gather\enum, "0", 0, %Disable% + +HKR, Ndi\Params\Offload.GuestCS, ParamDesc, 0, %GuestCS% +HKR, Ndi\Params\Offload.GuestCS, Default, 0, "0" +HKR, Ndi\Params\Offload.GuestCS, type, 0, "enum" +HKR, Ndi\Params\Offload.GuestCS\enum,"1", 0, %Enable% +HKR, Ndi\Params\Offload.GuestCS\enum,"0", 0, %Disable% + +HKR, Ndi\Params\InterruptRecovery, ParamDesc, 0, %InterruptRecovery% +HKR, Ndi\Params\InterruptRecovery, Default, 0, "1" +HKR, Ndi\Params\InterruptRecovery, type, 0, "enum" +HKR, Ndi\Params\InterruptRecovery\enum, "1", 0, %Enable% +HKR, Ndi\Params\InterruptRecovery\enum, "0", 0, %Disable% + +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, ParamDesc, 0, %NumberOfHandledRXPackersInDPC% +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, type, 0, "long" +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, default, 0, "1000" +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, min, 0, "1" +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, max, 0, "10000" +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, step, 0, "1" + +HKR, Ndi\Params\PacketFilter, ParamDesc, 0, %PacketFilter% +HKR, Ndi\Params\PacketFilter, Default, 0, "1" +HKR, Ndi\Params\PacketFilter, type, 0, "enum" +HKR, Ndi\Params\PacketFilter\enum, "1", 0, %Enable% +HKR, Ndi\Params\PacketFilter\enum, "0", 0, %Disable% + +HKR, Ndi\Params\BatchReceive, ParamDesc, 0, %BatchReceive% +HKR, Ndi\Params\BatchReceive, Default, 0, "1" +HKR, Ndi\Params\BatchReceive, type, 0, "enum" +HKR, Ndi\Params\BatchReceive\enum, "1", 0, %Enable% +HKR, Ndi\Params\BatchReceive\enum, "0", 0, %Disable% + +HKR, Ndi\Params\Promiscuous, ParamDesc, 0, %Promiscuous% +HKR, Ndi\Params\Promiscuous, Default, 0, "0" +HKR, Ndi\Params\Promiscuous, type, 0, "enum" +HKR, Ndi\Params\Promiscuous\enum, "1", 0, %Enable% +HKR, Ndi\Params\Promiscuous\enum, "0", 0, %Disable% + +HKR, Ndi\Params\IPPacketsCheck, ParamDesc, 0, %IPPacketsCheck% +HKR, Ndi\Params\IPPacketsCheck, Default, 0, "0" +HKR, Ndi\Params\IPPacketsCheck, type, 0, "enum" +HKR, Ndi\Params\IPPacketsCheck\enum,"0", 0, %Disable% +HKR, Ndi\Params\IPPacketsCheck\enum,"1", 0, %Tx% +HKR, Ndi\Params\IPPacketsCheck\enum,"2", 0, %Rx% +HKR, Ndi\Params\IPPacketsCheck\enum,"3", 0, %TxRx% + +HKR, Ndi\Params\UseSwTxChecksum, ParamDesc, 0, %UseSwTxChecksum% +HKR, Ndi\Params\UseSwTxChecksum, Default, 0, "0" +HKR, Ndi\Params\UseSwTxChecksum, type, 0, "enum" +HKR, Ndi\Params\UseSwTxChecksum\enum, "1", 0, %Enable% +HKR, Ndi\Params\UseSwTxChecksum\enum, "0", 0, %Disable% + +[kvmnet5.CopyFiles] +netkvm.sys,,,2 + +[kvmnet5.Service] +DisplayName = %kvmnet5.Service.DispName% +ServiceType = 1 ;%SERVICE_KERNEL_DRIVER% +StartType = 3 ;%SERVICE_DEMAND_START% +ErrorControl = 1 ;%SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\netkvm.sys +LoadOrderGroup = NDIS +AddReg = TextModeFlags.Reg + +[kvmnet5.EventLog] +AddReg = kvmnet5.AddEventLog.Reg + +[kvmnet5.AddEventLog.Reg] +HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\netevent.dll" +HKR, , TypesSupported, 0x00010001, 7 + +[TextModeFlags.Reg] +HKR, , TextModeFlags, 0x00010001, 0x0001 + +[SourceDisksNames] +1 = %DiskId1%,,,"" + +[SourceDisksFiles] +netkvm.sys = 1,, + +[DestinationDirs] +kvmnet5.CopyFiles = 12 + +[Strings] +VENDOR = "INX_COMPANY" +kvmnet5.DeviceDesc = "INX_PREFIX_VENDORVirtIO Ethernet Adapter" +kvmnet5.Service.DispName = "INX_PREFIX_VENDORVirtIO Ethernet Adapter Service" +DiskId1 = "INX_PREFIX_VENDORVirtIO Ethernet Adapter Driver Disk #1" + +NetworkAddress = "Assign MAC" +ConnectRate = "Init.ConnectionRate(Mb)" +Priority = "Init.Do802.1PQ" +MergeableBuf = "Init.UseMergedBuffers" +MTU = "Init.MTUSize" +Indirect = "Init.IndirectTx" +TxCapacity = "Init.MaxTxBuffers" +RxCapacity = "Init.MaxRxBuffers" +Offload.TxChecksum = "Offload.Tx.Checksum" +Offload.TxLSO = "Offload.Tx.LSO" +Offload.RxCS = "Offload.Rx.Checksum" +EnableLogging = "Logging.Enable" +DebugLevel = "Logging.Level" +LogStatistics = "Logging.Statistics(sec)" + +ConnectAfter = "TestOnly.DelayConnect(ms)" +DPCCheck = "TestOnly.DPCChecking" +Gather = "TestOnly.Scatter-Gather" +GuestCS = "TestOnly.GuestChecksum" +InterruptRecovery = "TestOnly.InterruptRecovery" +PacketFilter = "TestOnly.HwPacketFilter" +BatchReceive = "TestOnly.BatchReceive" +Promiscuous = "TestOnly.Promiscuous" +IPPacketsCheck = "TestOnly.AnalyzeIPPackets" +NumberOfHandledRXPackersInDPC = "TestOnly.RXThrottle" +UseSwTxChecksum = "TestOnly.UseSwTxChecksum" +Tx = "Tx Enabled"; +Rx = "Rx Enabled"; +TxRx = "Rx & Tx Enabled"; + +Disable = "Disabled" +Enable = "Enabled" +Enable* = "Enabled*" +String_16 = "16" +String_32 = "32" +String_64 = "64" +String_128 = "128" +String_256 = "256" +String_512 = "512" +String_1024 = "1024" +PriorityVlanTag = "Priority and VLAN tagging" +PriorityOnly = "Priority" +VLan = "VLan" +Priority_Vlan = "All" +10M = "10M" +100M = "100M" +1G = "1G" +10G = "10G" +TCPv4 = "TCP(v4)" +TCPUDPv4 = "TCP/UDP(v4)" +TCPUDPAll = "TCP/UDP(v4,v6)" +All = "All" +IPv4 = "IPv4" +Maximal = "Maximal" + diff --git a/drivers/network/dd/netkvm/wxp/parandis.rc b/drivers/network/dd/netkvm/wxp/parandis.rc new file mode 100644 index 00000000000..97e0fabd7a4 --- /dev/null +++ b/drivers/network/dd/netkvm/wxp/parandis.rc @@ -0,0 +1,31 @@ +/* + * This file contains resource (version) definitions for NDIS 5 driver. + * + * Copyright (c) 2008-2017 Red Hat, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met : + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and / or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of their contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include + diff --git a/media/inf/CMakeLists.txt b/media/inf/CMakeLists.txt index efa4d7d1193..6d0a50bb1b6 100644 --- a/media/inf/CMakeLists.txt +++ b/media/inf/CMakeLists.txt @@ -28,6 +28,7 @@ list(APPEND INF_FILES netamd.inf nete1000.inf netisa.inf + netkvm.inf netrtl.inf netrtpnt.inf nettcpip.inf diff --git a/media/inf/netkvm.inf b/media/inf/netkvm.inf new file mode 100644 index 00000000000..72ef931113d --- /dev/null +++ b/media/inf/netkvm.inf @@ -0,0 +1,331 @@ +;/*++ +;Copyright (c) 2008-2019 Red Hat Inc. +; +; +;Module Name: +; netkvm.inf +; +;Abstract: +; VirtIO Ethernet Adapter +; +;Installation Notes: +; Using Devcon: Type "devcon install netkvm.inf PCI\VEN_1AF4&DEV_1000&SUBSYS_00011AF4&REV_00" or +; "devcon install netkvm.inf PCI\VEN_1AF4&DEV_1041&SUBSYS_11001AF4&REV_01" to install +; +;--*/ + +[version] +Signature = "$Windows NT$" +Class = Net +CatalogFile = netkvm.cat +ClassGUID = {4d36e972-e325-11ce-bfc1-08002be10318} +Provider = %VENDOR% +DriverVer = 04/12/2019,51.77.104.17100 +DriverPackageType = PlugAndPlay +DriverPackageDisplayName = %kvmnet5.DeviceDesc% + +[Manufacturer] +%VENDOR% = NetKVM, NTx86.5.1 + +[NetKVM.NTx86.5.1] +%kvmnet5.DeviceDesc% = kvmnet5.ndi, PCI\VEN_1AF4&DEV_1000&SUBSYS_00011AF4&REV_00, PCI\VEN_1AF4&DEV_1000 +%kvmnet5.DeviceDesc% = kvmnet5.ndi, PCI\VEN_1AF4&DEV_1041&SUBSYS_11001AF4&REV_01, PCI\VEN_1AF4&DEV_1041 + + +[kvmnet5.ndi] +Characteristics = 0x84 ; NCF_PHYSICAL | NCF_HAS_UI +BusType = 5 ; PCI +AddReg = kvmnet5.Reg, Parameters +CopyFiles = kvmnet5.CopyFiles +*IfType = 6 +*MediaType = 0 ; NdisMedium802_3 +*PhysicalMediaType = 0 ; NdisPhysicalMediumUnspecified + + +[kvmnet5.ndi.Services] +AddService = netkvm, 2, kvmnet5.Service, kvmnet5.EventLog + + +[kvmnet5.Reg] +HKR, , BusNumber, 0, "0" +HKR, Ndi, Service, 0, "netkvm" +HKR, Ndi\Interfaces, UpperRange, 0, "ndis5" +HKR, Ndi\Interfaces, LowerRange, 0, "ethernet" + +[Parameters] +HKR, Ndi\Params\ConnectRate, ParamDesc, 0, %ConnectRate% +HKR, Ndi\Params\ConnectRate, Default, 0, "1001" +HKR, Ndi\Params\ConnectRate, type, 0, "enum" +HKR, Ndi\Params\ConnectRate\enum, "10", 0, %10M% +HKR, Ndi\Params\ConnectRate\enum, "100", 0, %100M% +HKR, Ndi\Params\ConnectRate\enum, "1001", 0, %1G% +HKR, Ndi\Params\ConnectRate\enum, "10000", 0, %10G% + +HKR, Ndi\Params\Priority, ParamDesc, 0, %Priority% +HKR, Ndi\Params\Priority, Default, 0, "1" +HKR, Ndi\Params\Priority, type, 0, "enum" +HKR, Ndi\Params\Priority\enum, "1", 0, %Enable% +HKR, Ndi\Params\Priority\enum, "0", 0, %Disable% + +HKR, Ndi\Params\*PriorityVLANTag, ParamDesc, 0, %PriorityVlanTag% +HKR, Ndi\Params\*PriorityVLANTag, Default, 0, "3" +HKR, Ndi\Params\*PriorityVLANTag, type, 0, "enum" +HKR, Ndi\Params\*PriorityVLANTag\enum, "3", 0, %Priority_Vlan% +HKR, Ndi\Params\*PriorityVLANTag\enum, "2", 0, %VLan% +HKR, Ndi\Params\*PriorityVLANTag\enum, "1", 0, %PriorityOnly% +HKR, Ndi\Params\*PriorityVLANTag\enum, "0", 0, %Disable% + +HKR, Ndi\Params\DoLog, ParamDesc, 0, %EnableLogging% +HKR, Ndi\Params\DoLog, Default, 0, "1" +HKR, Ndi\Params\DoLog, type, 0, "enum" +HKR, Ndi\Params\DoLog\enum, "1", 0, %Enable% +HKR, Ndi\Params\DoLog\enum, "0", 0, %Disable% + +HKR, Ndi\params\DebugLevel, ParamDesc, 0, %DebugLevel% +HKR, Ndi\params\DebugLevel, type, 0, "int" +HKR, Ndi\params\DebugLevel, default, 0, "0" +HKR, Ndi\params\DebugLevel, min, 0, "0" +HKR, Ndi\params\DebugLevel, max, 0, "8" +HKR, Ndi\params\DebugLevel, step, 0, "1" + +HKR, Ndi\params\LogStatistics, ParamDesc, 0, %LogStatistics% +HKR, Ndi\params\LogStatistics, type, 0, "int" +HKR, Ndi\params\LogStatistics, default, 0, "0" +HKR, Ndi\params\LogStatistics, min, 0, "0" +HKR, Ndi\params\LogStatistics, max, 0, "10000" +HKR, Ndi\params\LogStatistics, step, 0, "1" + +HKR, Ndi\params\MTU, ParamDesc, 0, %MTU% +HKR, Ndi\params\MTU, type, 0, "long" +HKR, Ndi\params\MTU, default, 0, "1500" +HKR, Ndi\params\MTU, min, 0, "500" +HKR, Ndi\params\MTU, max, 0, "65500" +HKR, Ndi\params\MTU, step, 0, "1" + +HKR, Ndi\params\TxCapacity, ParamDesc, 0, %TxCapacity% +HKR, Ndi\params\TxCapacity, type, 0, "enum" +HKR, Ndi\params\TxCapacity, default, 0, "1024" +HKR, Ndi\Params\TxCapacity\enum, "16", 0, %String_16% +HKR, Ndi\Params\TxCapacity\enum, "32", 0, %String_32% +HKR, Ndi\Params\TxCapacity\enum, "64", 0, %String_64% +HKR, Ndi\Params\TxCapacity\enum, "128", 0, %String_128% +HKR, Ndi\Params\TxCapacity\enum, "256", 0, %String_256% +HKR, Ndi\Params\TxCapacity\enum, "512", 0, %String_512% +HKR, Ndi\Params\TxCapacity\enum, "1024", 0, %String_1024% + +HKR, Ndi\params\RxCapacity, ParamDesc, 0, %RxCapacity% +HKR, Ndi\params\RxCapacity, type, 0, "enum" +HKR, Ndi\params\RxCapacity, default, 0, "256" +HKR, Ndi\Params\RxCapacity\enum, "16", 0, %String_16% +HKR, Ndi\Params\RxCapacity\enum, "32", 0, %String_32% +HKR, Ndi\Params\RxCapacity\enum, "64", 0, %String_64% +HKR, Ndi\Params\RxCapacity\enum, "128", 0, %String_128% +HKR, Ndi\Params\RxCapacity\enum, "256", 0, %String_256% +HKR, Ndi\Params\RxCapacity\enum, "512", 0, %String_512% +HKR, Ndi\Params\RxCapacity\enum, "1024", 0, %String_1024% + +HKR, Ndi\Params\MergeableBuf, ParamDesc, 0, %MergeableBuf% +HKR, Ndi\Params\MergeableBuf, Default, 0, "1" +HKR, Ndi\Params\MergeableBuf, type, 0, "enum" +HKR, Ndi\Params\MergeableBuf\enum, "1", 0, %Enable% +HKR, Ndi\Params\MergeableBuf\enum, "0", 0, %Disable% + +HKR, Ndi\params\NetworkAddress, ParamDesc, 0, %NetworkAddress% +HKR, Ndi\params\NetworkAddress, type, 0, "edit" +HKR, Ndi\params\NetworkAddress, Optional, 0, "1" + +HKR, Ndi\Params\Indirect, ParamDesc, 0, %Indirect% +HKR, Ndi\Params\Indirect, Default, 0, "0" +HKR, Ndi\Params\Indirect, type, 0, "enum" +HKR, Ndi\Params\Indirect\enum, "0", 0, %Disable% +HKR, Ndi\Params\Indirect\enum, "1", 0, %Enable% +HKR, Ndi\Params\Indirect\enum, "2", 0, %Enable*% + +HKR, Ndi\Params\OffLoad.TxChecksum, ParamDesc, 0, %OffLoad.TxChecksum% +HKR, Ndi\Params\OffLoad.TxChecksum, Default, 0, "0" +HKR, Ndi\Params\OffLoad.TxChecksum, type, 0, "enum" +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "31", 0, %All% +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "27", 0, %TCPUDPAll% +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "3", 0, %TCPUDPv4% +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "1", 0, %TCPv4% +HKR, Ndi\Params\OffLoad.TxChecksum\enum, "0", 0, %Disable% + +HKR, Ndi\Params\OffLoad.TxLSO, ParamDesc, 0, %OffLoad.TxLSO% +HKR, Ndi\Params\OffLoad.TxLSO, Default, 0, "2" +HKR, Ndi\Params\OffLoad.TxLSO, type, 0, "enum" +HKR, Ndi\Params\OffLoad.TxLSO\enum, "2", 0, %Maximal% +HKR, Ndi\Params\OffLoad.TxLSO\enum, "1", 0, %IPv4% +HKR, Ndi\Params\OffLoad.TxLSO\enum, "0", 0, %Disable% + +HKR, Ndi\Params\OffLoad.RxCS, ParamDesc, 0, %OffLoad.RxCS% +HKR, Ndi\Params\OffLoad.RxCS, Default, 0, "0" +HKR, Ndi\Params\OffLoad.RxCS, type, 0, "enum" +HKR, Ndi\Params\OffLoad.RxCS\enum, "31", 0, %All% +HKR, Ndi\Params\OffLoad.RxCS\enum, "27", 0, %TCPUDPAll% +HKR, Ndi\Params\OffLoad.RxCS\enum, "3", 0, %TCPUDPv4% +HKR, Ndi\Params\OffLoad.RxCS\enum, "1", 0, %TCPv4% +HKR, Ndi\Params\OffLoad.RxCS\enum, "0", 0, %Disable% + +HKR, Ndi\params\ConnectTimer, ParamDesc, 0, %ConnectAfter% +HKR, Ndi\params\ConnectTimer, type, 0, "long" +HKR, Ndi\params\ConnectTimer, default, 0, "0" +HKR, Ndi\params\ConnectTimer, min, 0, "0" +HKR, Ndi\params\ConnectTimer, max, 0, "300000" +HKR, Ndi\params\ConnectTimer, step, 0, "50" + +HKR, Ndi\Params\DpcCheck, ParamDesc, 0, %DPCCheck% +HKR, Ndi\Params\DpcCheck, Default, 0, "0" +HKR, Ndi\Params\DpcCheck, type, 0, "enum" +HKR, Ndi\Params\DpcCheck\enum, "1", 0, %Enable% +HKR, Ndi\Params\DpcCheck\enum, "0", 0, %Disable% +HKR, Ndi\Params\DpcCheck\enum, "2", 0, %Enable*% + +HKR, Ndi\Params\Gather, ParamDesc, 0, %Gather% +HKR, Ndi\Params\Gather, Default, 0, "1" +HKR, Ndi\Params\Gather, type, 0, "enum" +HKR, Ndi\Params\Gather\enum, "1", 0, %Enable% +HKR, Ndi\Params\Gather\enum, "0", 0, %Disable% + +HKR, Ndi\Params\Offload.GuestCS, ParamDesc, 0, %GuestCS% +HKR, Ndi\Params\Offload.GuestCS, Default, 0, "0" +HKR, Ndi\Params\Offload.GuestCS, type, 0, "enum" +HKR, Ndi\Params\Offload.GuestCS\enum,"1", 0, %Enable% +HKR, Ndi\Params\Offload.GuestCS\enum,"0", 0, %Disable% + +HKR, Ndi\Params\InterruptRecovery, ParamDesc, 0, %InterruptRecovery% +HKR, Ndi\Params\InterruptRecovery, Default, 0, "1" +HKR, Ndi\Params\InterruptRecovery, type, 0, "enum" +HKR, Ndi\Params\InterruptRecovery\enum, "1", 0, %Enable% +HKR, Ndi\Params\InterruptRecovery\enum, "0", 0, %Disable% + +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, ParamDesc, 0, %NumberOfHandledRXPackersInDPC% +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, type, 0, "long" +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, default, 0, "1000" +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, min, 0, "1" +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, max, 0, "10000" +HKR, Ndi\params\NumberOfHandledRXPackersInDPC, step, 0, "1" + +HKR, Ndi\Params\PacketFilter, ParamDesc, 0, %PacketFilter% +HKR, Ndi\Params\PacketFilter, Default, 0, "1" +HKR, Ndi\Params\PacketFilter, type, 0, "enum" +HKR, Ndi\Params\PacketFilter\enum, "1", 0, %Enable% +HKR, Ndi\Params\PacketFilter\enum, "0", 0, %Disable% + +HKR, Ndi\Params\BatchReceive, ParamDesc, 0, %BatchReceive% +HKR, Ndi\Params\BatchReceive, Default, 0, "1" +HKR, Ndi\Params\BatchReceive, type, 0, "enum" +HKR, Ndi\Params\BatchReceive\enum, "1", 0, %Enable% +HKR, Ndi\Params\BatchReceive\enum, "0", 0, %Disable% + +HKR, Ndi\Params\Promiscuous, ParamDesc, 0, %Promiscuous% +HKR, Ndi\Params\Promiscuous, Default, 0, "0" +HKR, Ndi\Params\Promiscuous, type, 0, "enum" +HKR, Ndi\Params\Promiscuous\enum, "1", 0, %Enable% +HKR, Ndi\Params\Promiscuous\enum, "0", 0, %Disable% + +HKR, Ndi\Params\IPPacketsCheck, ParamDesc, 0, %IPPacketsCheck% +HKR, Ndi\Params\IPPacketsCheck, Default, 0, "0" +HKR, Ndi\Params\IPPacketsCheck, type, 0, "enum" +HKR, Ndi\Params\IPPacketsCheck\enum,"0", 0, %Disable% +HKR, Ndi\Params\IPPacketsCheck\enum,"1", 0, %Tx% +HKR, Ndi\Params\IPPacketsCheck\enum,"2", 0, %Rx% +HKR, Ndi\Params\IPPacketsCheck\enum,"3", 0, %TxRx% + +HKR, Ndi\Params\UseSwTxChecksum, ParamDesc, 0, %UseSwTxChecksum% +HKR, Ndi\Params\UseSwTxChecksum, Default, 0, "0" +HKR, Ndi\Params\UseSwTxChecksum, type, 0, "enum" +HKR, Ndi\Params\UseSwTxChecksum\enum, "1", 0, %Enable% +HKR, Ndi\Params\UseSwTxChecksum\enum, "0", 0, %Disable% + +[kvmnet5.CopyFiles] +netkvm.sys,,,2 + +[kvmnet5.Service] +DisplayName = %kvmnet5.Service.DispName% +ServiceType = 1 ;%SERVICE_KERNEL_DRIVER% +StartType = 3 ;%SERVICE_DEMAND_START% +ErrorControl = 1 ;%SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\netkvm.sys +LoadOrderGroup = NDIS +AddReg = TextModeFlags.Reg + +[kvmnet5.EventLog] +AddReg = kvmnet5.AddEventLog.Reg + +[kvmnet5.AddEventLog.Reg] +HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\netevent.dll" +HKR, , TypesSupported, 0x00010001, 7 + +[TextModeFlags.Reg] +HKR, , TextModeFlags, 0x00010001, 0x0001 + +[SourceDisksNames] +1 = %DiskId1%,,,"" + +[SourceDisksFiles] +netkvm.sys = 1,, + +[DestinationDirs] +kvmnet5.CopyFiles = 12 + +[Strings] +VENDOR = "Red Hat, Inc." +kvmnet5.DeviceDesc = "Red Hat VirtIO Ethernet Adapter" +kvmnet5.Service.DispName = "Red Hat VirtIO Ethernet Adapter Service" +DiskId1 = "Red Hat VirtIO Ethernet Adapter Driver Disk #1" + +NetworkAddress = "Assign MAC" +ConnectRate = "Init.ConnectionRate(Mb)" +Priority = "Init.Do802.1PQ" +MergeableBuf = "Init.UseMergedBuffers" +MTU = "Init.MTUSize" +Indirect = "Init.IndirectTx" +TxCapacity = "Init.MaxTxBuffers" +RxCapacity = "Init.MaxRxBuffers" +Offload.TxChecksum = "Offload.Tx.Checksum" +Offload.TxLSO = "Offload.Tx.LSO" +Offload.RxCS = "Offload.Rx.Checksum" +EnableLogging = "Logging.Enable" +DebugLevel = "Logging.Level" +LogStatistics = "Logging.Statistics(sec)" + +ConnectAfter = "TestOnly.DelayConnect(ms)" +DPCCheck = "TestOnly.DPCChecking" +Gather = "TestOnly.Scatter-Gather" +GuestCS = "TestOnly.GuestChecksum" +InterruptRecovery = "TestOnly.InterruptRecovery" +PacketFilter = "TestOnly.HwPacketFilter" +BatchReceive = "TestOnly.BatchReceive" +Promiscuous = "TestOnly.Promiscuous" +IPPacketsCheck = "TestOnly.AnalyzeIPPackets" +NumberOfHandledRXPackersInDPC = "TestOnly.RXThrottle" +UseSwTxChecksum = "TestOnly.UseSwTxChecksum" +Tx = "Tx Enabled"; +Rx = "Rx Enabled"; +TxRx = "Rx & Tx Enabled"; + +Disable = "Disabled" +Enable = "Enabled" +Enable* = "Enabled*" +String_16 = "16" +String_32 = "32" +String_64 = "64" +String_128 = "128" +String_256 = "256" +String_512 = "512" +String_1024 = "1024" +PriorityVlanTag = "Priority and VLAN tagging" +PriorityOnly = "Priority" +VLan = "VLan" +Priority_Vlan = "All" +10M = "10M" +100M = "100M" +1G = "1G" +10G = "10G" +TCPv4 = "TCP(v4)" +TCPUDPv4 = "TCP/UDP(v4)" +TCPUDPAll = "TCP/UDP(v4,v6)" +All = "All" +IPv4 = "IPv4" +Maximal = "Maximal" + diff --git a/sdk/include/ddk/ndis.h b/sdk/include/ddk/ndis.h index c5f1dd58514..8f53b1c568a 100644 --- a/sdk/include/ddk/ndis.h +++ b/sdk/include/ddk/ndis.h @@ -4256,7 +4256,7 @@ NdisDprReleaseSpinLock( * OUT PUCHAR Data); */ #define NdisReadRegisterUchar(Register, Data) \ - *(Data) = *(Register) + *(Data) = *((volatile UCHAR * const) (Register)) /* * VOID @@ -4265,7 +4265,7 @@ NdisDprReleaseSpinLock( * OUT PULONG Data); */ #define NdisReadRegisterUlong(Register, Data) \ - *(Data) = *(Register) + *(Data) = *((volatile ULONG * const) (Register)) /* * VOID @@ -4274,7 +4274,7 @@ NdisDprReleaseSpinLock( * OUT PUSHORT Data); */ #define NdisReadRegisterUshort(Register, Data) \ - *(Data) = *(Register) + *(Data) = *((volatile USHORT * const) (Register)) /* * VOID diff --git a/sdk/include/xdk/kefuncs.h b/sdk/include/xdk/kefuncs.h index c710257751b..aa8597b56fe 100644 --- a/sdk/include/xdk/kefuncs.h +++ b/sdk/include/xdk/kefuncs.h @@ -1464,7 +1464,7 @@ KeFlushWriteBuffer(VOID); * IN PKBUGCHECK_CALLBACK_RECORD CallbackRecord) */ #define KeInitializeCallbackRecord(CallbackRecord) \ - CallbackRecord->State = BufferEmpty; + (CallbackRecord)->State = BufferEmpty; #if defined(_PREFAST_)