mirror of
https://github.com/reactos/reactos.git
synced 2025-08-02 04:45:43 +00:00
[SDK][VIRTIO][NETKVM] Make VirtIO a separate library (#6280)
* [SDK][VIRTIO][NETKVM] Make VirtIO a separate library This is to avoid code duplication when more VirtIO drivers are brought in. This will also be used on development of a VirtIO XDDM GPU Driver. * [VIRTIO] Sync with upstream
This commit is contained in:
parent
96d5b6281d
commit
823fdb19d7
20 changed files with 35 additions and 8 deletions
|
@ -1,5 +1,6 @@
|
|||
|
||||
include_directories(BEFORE Common virtio)
|
||||
include_directories(BEFORE Common
|
||||
${REACTOS_SOURCE_DIR}/sdk/lib/drivers/virtio)
|
||||
|
||||
add_definitions(
|
||||
-DNDIS_MINIPORT_DRIVER
|
||||
|
@ -11,16 +12,12 @@ list(APPEND SOURCE
|
|||
Common/ParaNdis-VirtIO.c
|
||||
Common/ParaNdis-Debug.c
|
||||
Common/sw-offload.c
|
||||
virtio/VirtIOPCICommon.c
|
||||
virtio/VirtIOPCILegacy.c
|
||||
virtio/VirtIOPCIModern.c
|
||||
virtio/VirtIORing.c
|
||||
virtio/VirtIORing-Packed.c
|
||||
wxp/ParaNdis5-Driver.c
|
||||
wxp/ParaNdis5-Impl.c
|
||||
wxp/ParaNdis5-Oid.c)
|
||||
|
||||
add_library(netkvm MODULE ${SOURCE} wxp/parandis.rc)
|
||||
target_link_libraries(netkvm virtio)
|
||||
set_module_type(netkvm kernelmodedriver)
|
||||
add_importlibs(netkvm ndis ntoskrnl hal)
|
||||
add_cd_file(TARGET netkvm DESTINATION reactos/system32/drivers FOR all)
|
||||
|
@ -29,7 +26,6 @@ add_driver_inf(netkvm netkvm.inf)
|
|||
if(NOT MSVC)
|
||||
target_compile_options(netkvm PRIVATE
|
||||
-Wno-unused-function
|
||||
-Wno-unknown-pragmas
|
||||
-Wno-pointer-sign
|
||||
-Wno-pointer-to-int-cast
|
||||
-Wno-int-to-pointer-cast
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
Copyright 2009-2017 Red Hat, Inc. and/or its affiliates.
|
||||
Copyright 2016 Google, Inc.
|
||||
Copyright 2007 IBM Corporation
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,128 +0,0 @@
|
|||
#ifndef _LINUX_VIRTIO_H
|
||||
#define _LINUX_VIRTIO_H
|
||||
|
||||
#include "virtio_ring.h"
|
||||
|
||||
#define scatterlist VirtIOBufferDescriptor
|
||||
|
||||
struct VirtIOBufferDescriptor {
|
||||
PHYSICAL_ADDRESS physAddr;
|
||||
ULONG length;
|
||||
};
|
||||
|
||||
typedef int (*proc_virtqueue_add_buf)(
|
||||
struct virtqueue *vq,
|
||||
struct scatterlist sg[],
|
||||
unsigned int out_num,
|
||||
unsigned int in_num,
|
||||
void *opaque,
|
||||
void *va_indirect,
|
||||
ULONGLONG phys_indirect);
|
||||
|
||||
typedef bool(*proc_virtqueue_kick_prepare)(struct virtqueue *vq);
|
||||
|
||||
typedef void(*proc_virtqueue_kick_always)(struct virtqueue *vq);
|
||||
|
||||
typedef void * (*proc_virtqueue_get_buf)(struct virtqueue *vq, unsigned int *len);
|
||||
|
||||
typedef void(*proc_virtqueue_disable_cb)(struct virtqueue *vq);
|
||||
|
||||
typedef bool(*proc_virtqueue_enable_cb)(struct virtqueue *vq);
|
||||
|
||||
typedef bool(*proc_virtqueue_enable_cb_delayed)(struct virtqueue *vq);
|
||||
|
||||
typedef void * (*proc_virtqueue_detach_unused_buf)(struct virtqueue *vq);
|
||||
|
||||
typedef BOOLEAN(*proc_virtqueue_is_interrupt_enabled)(struct virtqueue *vq);
|
||||
|
||||
typedef BOOLEAN(*proc_virtqueue_has_buf)(struct virtqueue *vq);
|
||||
|
||||
typedef void(*proc_virtqueue_shutdown)(struct virtqueue *vq);
|
||||
|
||||
/* Represents one virtqueue; only data pointed to by the vring structure is exposed to the host */
|
||||
struct virtqueue {
|
||||
VirtIODevice *vdev;
|
||||
unsigned int index;
|
||||
void (*notification_cb)(struct virtqueue *vq);
|
||||
void *notification_addr;
|
||||
void *avail_va;
|
||||
void *used_va;
|
||||
proc_virtqueue_add_buf add_buf;
|
||||
proc_virtqueue_kick_prepare kick_prepare;
|
||||
proc_virtqueue_kick_always kick_always;
|
||||
proc_virtqueue_get_buf get_buf;
|
||||
proc_virtqueue_disable_cb disable_cb;
|
||||
proc_virtqueue_enable_cb enable_cb;
|
||||
proc_virtqueue_enable_cb_delayed enable_cb_delayed;
|
||||
proc_virtqueue_detach_unused_buf detach_unused_buf;
|
||||
proc_virtqueue_is_interrupt_enabled is_interrupt_enabled;
|
||||
proc_virtqueue_has_buf has_buf;
|
||||
proc_virtqueue_shutdown shutdown;
|
||||
};
|
||||
|
||||
static inline int virtqueue_add_buf(
|
||||
struct virtqueue *vq,
|
||||
struct scatterlist sg[],
|
||||
unsigned int out_num,
|
||||
unsigned int in_num,
|
||||
void *opaque,
|
||||
void *va_indirect,
|
||||
ULONGLONG phys_indirect)
|
||||
{
|
||||
return vq->add_buf(vq, sg, out_num, in_num, opaque, va_indirect, phys_indirect);
|
||||
}
|
||||
|
||||
static inline bool virtqueue_kick_prepare(struct virtqueue *vq)
|
||||
{
|
||||
return vq->kick_prepare(vq);
|
||||
}
|
||||
|
||||
static inline void virtqueue_kick_always(struct virtqueue *vq)
|
||||
{
|
||||
vq->kick_always(vq);
|
||||
}
|
||||
|
||||
static inline void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
|
||||
{
|
||||
return vq->get_buf(vq, len);
|
||||
}
|
||||
|
||||
static inline void virtqueue_disable_cb(struct virtqueue *vq)
|
||||
{
|
||||
vq->disable_cb(vq);
|
||||
}
|
||||
|
||||
static inline bool virtqueue_enable_cb(struct virtqueue *vq)
|
||||
{
|
||||
return vq->enable_cb(vq);
|
||||
}
|
||||
|
||||
static inline bool virtqueue_enable_cb_delayed(struct virtqueue *vq)
|
||||
{
|
||||
return vq->enable_cb_delayed(vq);
|
||||
}
|
||||
|
||||
static inline void *virtqueue_detach_unused_buf(struct virtqueue *vq)
|
||||
{
|
||||
return vq->detach_unused_buf(vq);
|
||||
}
|
||||
|
||||
static inline BOOLEAN virtqueue_is_interrupt_enabled(struct virtqueue *vq)
|
||||
{
|
||||
return vq->is_interrupt_enabled(vq);
|
||||
}
|
||||
|
||||
static inline BOOLEAN virtqueue_has_buf(struct virtqueue *vq)
|
||||
{
|
||||
return vq->has_buf(vq);
|
||||
}
|
||||
|
||||
static inline void virtqueue_shutdown(struct virtqueue *vq)
|
||||
{
|
||||
vq->shutdown(vq);
|
||||
}
|
||||
|
||||
void virtqueue_notify(struct virtqueue *vq);
|
||||
void virtqueue_kick(struct virtqueue *vq);
|
||||
|
||||
#endif /* _LINUX_VIRTIO_H */
|
|
@ -1,411 +0,0 @@
|
|||
/*
|
||||
* Virtio PCI driver - common functionality for all device versions
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
* Copyright Red Hat, Inc. 2014
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
* Rusty Russell <rusty@rustcorp.com.au>
|
||||
* Michael S. Tsirkin <mst@redhat.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met :
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and / or other materials provided with the distribution.
|
||||
* 3. Neither the names of the copyright holders nor the names of their contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "osdep.h"
|
||||
#include "virtio_pci.h"
|
||||
#include "VirtIO.h"
|
||||
#include "kdebugprint.h"
|
||||
#include <stddef.h>
|
||||
|
||||
#include "virtio_pci_common.h"
|
||||
|
||||
NTSTATUS virtio_device_initialize(VirtIODevice *vdev,
|
||||
const VirtIOSystemOps *pSystemOps,
|
||||
PVOID DeviceContext,
|
||||
bool msix_used)
|
||||
{
|
||||
NTSTATUS status;
|
||||
|
||||
RtlZeroMemory(vdev, sizeof(VirtIODevice));
|
||||
vdev->DeviceContext = DeviceContext;
|
||||
vdev->system = pSystemOps;
|
||||
vdev->msix_used = msix_used;
|
||||
vdev->info = vdev->inline_info;
|
||||
vdev->maxQueues = ARRAYSIZE(vdev->inline_info);
|
||||
|
||||
status = vio_modern_initialize(vdev);
|
||||
if (status == STATUS_DEVICE_NOT_CONNECTED) {
|
||||
/* fall back to legacy virtio device */
|
||||
status = vio_legacy_initialize(vdev);
|
||||
}
|
||||
if (NT_SUCCESS(status)) {
|
||||
/* Always start by resetting the device */
|
||||
virtio_device_reset(vdev);
|
||||
|
||||
/* Acknowledge that we've seen the device. */
|
||||
virtio_add_status(vdev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
|
||||
|
||||
/* If we are here, we must have found a driver for the device */
|
||||
virtio_add_status(vdev, VIRTIO_CONFIG_S_DRIVER);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void virtio_device_shutdown(VirtIODevice *vdev)
|
||||
{
|
||||
if (vdev->info &&
|
||||
vdev->info != vdev->inline_info) {
|
||||
mem_free_nonpaged_block(vdev, vdev->info);
|
||||
vdev->info = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
u8 virtio_get_status(VirtIODevice *vdev)
|
||||
{
|
||||
return vdev->device->get_status(vdev);
|
||||
}
|
||||
|
||||
void virtio_set_status(VirtIODevice *vdev, u8 status)
|
||||
{
|
||||
vdev->device->set_status(vdev, status);
|
||||
}
|
||||
|
||||
void virtio_add_status(VirtIODevice *vdev, u8 status)
|
||||
{
|
||||
vdev->device->set_status(vdev, (u8)(vdev->device->get_status(vdev) | status));
|
||||
}
|
||||
|
||||
void virtio_device_reset(VirtIODevice *vdev)
|
||||
{
|
||||
vdev->device->reset(vdev);
|
||||
}
|
||||
|
||||
void virtio_device_ready(VirtIODevice *vdev)
|
||||
{
|
||||
unsigned status = vdev->device->get_status(vdev);
|
||||
|
||||
ASSERT(!(status & VIRTIO_CONFIG_S_DRIVER_OK));
|
||||
vdev->device->set_status(vdev, (u8)(status | VIRTIO_CONFIG_S_DRIVER_OK));
|
||||
}
|
||||
|
||||
u64 virtio_get_features(VirtIODevice *vdev)
|
||||
{
|
||||
return vdev->device->get_features(vdev);
|
||||
}
|
||||
|
||||
NTSTATUS virtio_set_features(VirtIODevice *vdev, u64 features)
|
||||
{
|
||||
unsigned char dev_status;
|
||||
NTSTATUS status;
|
||||
|
||||
vdev->event_suppression_enabled = virtio_is_feature_enabled(features, VIRTIO_RING_F_EVENT_IDX);
|
||||
vdev->packed_ring = virtio_is_feature_enabled(features, VIRTIO_F_RING_PACKED);
|
||||
|
||||
status = vdev->device->set_features(vdev, features);
|
||||
if (!NT_SUCCESS(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
if (!virtio_is_feature_enabled(features, VIRTIO_F_VERSION_1)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
virtio_add_status(vdev, VIRTIO_CONFIG_S_FEATURES_OK);
|
||||
dev_status = vdev->device->get_status(vdev);
|
||||
if (!(dev_status & VIRTIO_CONFIG_S_FEATURES_OK)) {
|
||||
DPrintf(0, "virtio: device refuses features: %x\n", dev_status);
|
||||
status = STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Read @count fields, @bytes each. */
|
||||
static void virtio_cread_many(VirtIODevice *vdev,
|
||||
unsigned int offset,
|
||||
void *buf, size_t count, size_t bytes)
|
||||
{
|
||||
u32 old, gen = vdev->device->get_config_generation ?
|
||||
vdev->device->get_config_generation(vdev) : 0;
|
||||
size_t i;
|
||||
|
||||
do {
|
||||
old = gen;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
vdev->device->get_config(vdev, (unsigned)(offset + bytes * i),
|
||||
(char *)buf + i * bytes, (unsigned)bytes);
|
||||
}
|
||||
|
||||
gen = vdev->device->get_config_generation ?
|
||||
vdev->device->get_config_generation(vdev) : 0;
|
||||
} while (gen != old);
|
||||
}
|
||||
|
||||
void virtio_get_config(VirtIODevice *vdev, unsigned offset,
|
||||
void *buf, unsigned len)
|
||||
{
|
||||
switch (len) {
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
vdev->device->get_config(vdev, offset, buf, len);
|
||||
break;
|
||||
case 8:
|
||||
virtio_cread_many(vdev, offset, buf, 2, sizeof(u32));
|
||||
break;
|
||||
default:
|
||||
virtio_cread_many(vdev, offset, buf, len, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write @count fields, @bytes each. */
|
||||
static void virtio_cwrite_many(VirtIODevice *vdev,
|
||||
unsigned int offset,
|
||||
void *buf, size_t count, size_t bytes)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; i < count; i++) {
|
||||
vdev->device->set_config(vdev, (unsigned)(offset + bytes * i),
|
||||
(char *)buf + i * bytes, (unsigned)bytes);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_set_config(VirtIODevice *vdev, unsigned offset,
|
||||
void *buf, unsigned len)
|
||||
{
|
||||
switch (len) {
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
vdev->device->set_config(vdev, offset, buf, len);
|
||||
break;
|
||||
case 8:
|
||||
virtio_cwrite_many(vdev, offset, buf, 2, sizeof(u32));
|
||||
break;
|
||||
default:
|
||||
virtio_cwrite_many(vdev, offset, buf, len, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
NTSTATUS virtio_query_queue_allocation(VirtIODevice *vdev,
|
||||
unsigned index,
|
||||
unsigned short *pNumEntries,
|
||||
unsigned long *pRingSize,
|
||||
unsigned long *pHeapSize)
|
||||
{
|
||||
return vdev->device->query_queue_alloc(vdev, index, pNumEntries, pRingSize, pHeapSize);
|
||||
}
|
||||
|
||||
NTSTATUS virtio_reserve_queue_memory(VirtIODevice *vdev, unsigned nvqs)
|
||||
{
|
||||
if (nvqs > vdev->maxQueues) {
|
||||
/* allocate new space for queue infos */
|
||||
void *new_info = mem_alloc_nonpaged_block(vdev, nvqs * virtio_get_queue_descriptor_size());
|
||||
if (!new_info) {
|
||||
return STATUS_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
|
||||
if (vdev->info && vdev->info != vdev->inline_info) {
|
||||
mem_free_nonpaged_block(vdev, vdev->info);
|
||||
}
|
||||
vdev->info = new_info;
|
||||
vdev->maxQueues = nvqs;
|
||||
}
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static NTSTATUS vp_setup_vq(struct virtqueue **queue,
|
||||
VirtIODevice *vdev, unsigned index,
|
||||
u16 msix_vec)
|
||||
{
|
||||
VirtIOQueueInfo *info = &vdev->info[index];
|
||||
|
||||
NTSTATUS status = vdev->device->setup_queue(queue, vdev, info, index, msix_vec);
|
||||
if (NT_SUCCESS(status)) {
|
||||
info->vq = *queue;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
NTSTATUS virtio_find_queue(VirtIODevice *vdev, unsigned index,
|
||||
struct virtqueue **vq)
|
||||
{
|
||||
u16 msix_vec = vdev_get_msix_vector(vdev, index);
|
||||
return vp_setup_vq(
|
||||
vq,
|
||||
vdev,
|
||||
index,
|
||||
msix_vec);
|
||||
}
|
||||
|
||||
NTSTATUS virtio_find_queues(VirtIODevice *vdev,
|
||||
unsigned nvqs,
|
||||
struct virtqueue *vqs[])
|
||||
{
|
||||
unsigned i;
|
||||
NTSTATUS status;
|
||||
u16 msix_vec;
|
||||
|
||||
status = virtio_reserve_queue_memory(vdev, nvqs);
|
||||
if (!NT_SUCCESS(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
/* set up the device config interrupt */
|
||||
msix_vec = vdev_get_msix_vector(vdev, -1);
|
||||
|
||||
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
|
||||
msix_vec = vdev->device->set_config_vector(vdev, msix_vec);
|
||||
/* Verify we had enough resources to assign the vector */
|
||||
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
|
||||
status = STATUS_DEVICE_BUSY;
|
||||
goto error_find;
|
||||
}
|
||||
}
|
||||
|
||||
/* set up queue interrupts */
|
||||
for (i = 0; i < nvqs; i++) {
|
||||
msix_vec = vdev_get_msix_vector(vdev, i);
|
||||
status = vp_setup_vq(
|
||||
&vqs[i],
|
||||
vdev,
|
||||
i,
|
||||
msix_vec);
|
||||
if (!NT_SUCCESS(status)) {
|
||||
goto error_find;
|
||||
}
|
||||
}
|
||||
return STATUS_SUCCESS;
|
||||
|
||||
error_find:
|
||||
virtio_delete_queues(vdev);
|
||||
return status;
|
||||
}
|
||||
|
||||
void virtio_delete_queue(struct virtqueue *vq)
|
||||
{
|
||||
VirtIODevice *vdev = vq->vdev;
|
||||
unsigned i = vq->index;
|
||||
|
||||
vdev->device->delete_queue(&vdev->info[i]);
|
||||
vdev->info[i].vq = NULL;
|
||||
}
|
||||
|
||||
void virtio_delete_queues(VirtIODevice *vdev)
|
||||
{
|
||||
struct virtqueue *vq;
|
||||
unsigned i;
|
||||
|
||||
if (vdev->info == NULL)
|
||||
return;
|
||||
|
||||
for (i = 0; i < vdev->maxQueues; i++) {
|
||||
vq = vdev->info[i].vq;
|
||||
if (vq != NULL) {
|
||||
vdev->device->delete_queue(&vdev->info[i]);
|
||||
vdev->info[i].vq = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u32 virtio_get_queue_size(struct virtqueue *vq)
|
||||
{
|
||||
return vq->vdev->info[vq->index].num;
|
||||
}
|
||||
|
||||
u16 virtio_set_config_vector(VirtIODevice *vdev, u16 vector)
|
||||
{
|
||||
return vdev->device->set_config_vector(vdev, vector);
|
||||
}
|
||||
|
||||
u16 virtio_set_queue_vector(struct virtqueue *vq, u16 vector)
|
||||
{
|
||||
return vq->vdev->device->set_queue_vector(vq, vector);
|
||||
}
|
||||
|
||||
u8 virtio_read_isr_status(VirtIODevice *vdev)
|
||||
{
|
||||
return ioread8(vdev, vdev->isr);
|
||||
}
|
||||
|
||||
int virtio_get_bar_index(PPCI_COMMON_HEADER pPCIHeader, PHYSICAL_ADDRESS BasePA)
|
||||
{
|
||||
int iBar, i;
|
||||
|
||||
/* no point in supporting PCI and CardBus bridges */
|
||||
ASSERT((pPCIHeader->HeaderType & ~PCI_MULTIFUNCTION) == PCI_DEVICE_TYPE);
|
||||
|
||||
for (i = 0; i < PCI_TYPE0_ADDRESSES; i++) {
|
||||
PHYSICAL_ADDRESS BAR;
|
||||
BAR.LowPart = pPCIHeader->u.type0.BaseAddresses[i];
|
||||
|
||||
iBar = i;
|
||||
if (BAR.LowPart & PCI_ADDRESS_IO_SPACE) {
|
||||
/* I/O space */
|
||||
BAR.LowPart &= PCI_ADDRESS_IO_ADDRESS_MASK;
|
||||
BAR.HighPart = 0;
|
||||
} else if ((BAR.LowPart & PCI_ADDRESS_MEMORY_TYPE_MASK) == PCI_TYPE_64BIT) {
|
||||
/* memory space 64-bit */
|
||||
BAR.LowPart &= PCI_ADDRESS_MEMORY_ADDRESS_MASK;
|
||||
BAR.HighPart = pPCIHeader->u.type0.BaseAddresses[++i];
|
||||
} else {
|
||||
/* memory space 32-bit */
|
||||
BAR.LowPart &= PCI_ADDRESS_MEMORY_ADDRESS_MASK;
|
||||
BAR.HighPart = 0;
|
||||
}
|
||||
|
||||
if (BAR.QuadPart == BasePA.QuadPart) {
|
||||
return iBar;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* The notify function used when creating a virt queue, common to both modern
|
||||
* and legacy (the difference is in how vq->notification_addr is set up).
|
||||
*/
|
||||
void vp_notify(struct virtqueue *vq)
|
||||
{
|
||||
/* we write the queue's selector into the notification register to
|
||||
* signal the other end */
|
||||
iowrite16(vq->vdev, (unsigned short)vq->index, vq->notification_addr);
|
||||
DPrintf(6, "virtio: vp_notify vq->index = %x\n", vq->index);
|
||||
}
|
||||
|
||||
void virtqueue_notify(struct virtqueue *vq)
|
||||
{
|
||||
vq->notification_cb(vq);
|
||||
}
|
||||
|
||||
void virtqueue_kick(struct virtqueue *vq)
|
||||
{
|
||||
if (virtqueue_kick_prepare(vq)) {
|
||||
virtqueue_notify(vq);
|
||||
}
|
||||
}
|
|
@ -1,283 +0,0 @@
|
|||
/*
|
||||
* Virtio PCI driver - legacy (virtio 0.9) device support
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
* Windows porting - Yan Vugenfirer <yvugenfi@redhat.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met :
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and / or other materials provided with the distribution.
|
||||
* 3. Neither the names of the copyright holders nor the names of their contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
#include "osdep.h"
|
||||
#include "virtio_pci.h"
|
||||
#include "VirtIO.h"
|
||||
#include "kdebugprint.h"
|
||||
#include "virtio_ring.h"
|
||||
#include "virtio_pci_common.h"
|
||||
#include "windows/virtio_ring_allocation.h"
|
||||
|
||||
#ifdef WPP_EVENT_TRACING
|
||||
#include "VirtIOPCILegacy.tmh"
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// vio_legacy_dump_registers - Dump HW registers of the device
|
||||
//
|
||||
/////////////////////////////////////////////////////////////////////////////////////
|
||||
void vio_legacy_dump_registers(VirtIODevice *vdev)
|
||||
{
|
||||
DPrintf(5, "%s\n", __FUNCTION__);
|
||||
|
||||
DPrintf(0, "[VIRTIO_PCI_HOST_FEATURES] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_HOST_FEATURES));
|
||||
DPrintf(0, "[VIRTIO_PCI_GUEST_FEATURES] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_GUEST_FEATURES));
|
||||
DPrintf(0, "[VIRTIO_PCI_QUEUE_PFN] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_PFN));
|
||||
DPrintf(0, "[VIRTIO_PCI_QUEUE_NUM] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NUM));
|
||||
DPrintf(0, "[VIRTIO_PCI_QUEUE_SEL] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_SEL));
|
||||
DPrintf(0, "[VIRTIO_PCI_QUEUE_NOTIFY] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NOTIFY));
|
||||
DPrintf(0, "[VIRTIO_PCI_STATUS] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_STATUS));
|
||||
DPrintf(0, "[VIRTIO_PCI_ISR] = %x\n", ioread32(vdev, vdev->addr + VIRTIO_PCI_ISR));
|
||||
}
|
||||
|
||||
static void vio_legacy_get_config(VirtIODevice * vdev,
|
||||
unsigned offset,
|
||||
void *buf,
|
||||
unsigned len)
|
||||
{
|
||||
ULONG_PTR ioaddr = vdev->addr + VIRTIO_PCI_CONFIG(vdev->msix_used) + offset;
|
||||
u8 *ptr = buf;
|
||||
unsigned i;
|
||||
|
||||
DPrintf(5, "%s\n", __FUNCTION__);
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
ptr[i] = ioread8(vdev, ioaddr + i);
|
||||
}
|
||||
}
|
||||
|
||||
static void vio_legacy_set_config(VirtIODevice *vdev,
|
||||
unsigned offset,
|
||||
const void *buf,
|
||||
unsigned len)
|
||||
{
|
||||
ULONG_PTR ioaddr = vdev->addr + VIRTIO_PCI_CONFIG(vdev->msix_used) + offset;
|
||||
const u8 *ptr = buf;
|
||||
unsigned i;
|
||||
|
||||
DPrintf(5, "%s\n", __FUNCTION__);
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
iowrite8(vdev, ptr[i], ioaddr + i);
|
||||
}
|
||||
}
|
||||
|
||||
static u8 vio_legacy_get_status(VirtIODevice *vdev)
|
||||
{
|
||||
DPrintf(6, "%s\n", __FUNCTION__);
|
||||
return ioread8(vdev, vdev->addr + VIRTIO_PCI_STATUS);
|
||||
}
|
||||
|
||||
static void vio_legacy_set_status(VirtIODevice *vdev, u8 status)
|
||||
{
|
||||
DPrintf(6, "%s>>> %x\n", __FUNCTION__, status);
|
||||
iowrite8(vdev, status, vdev->addr + VIRTIO_PCI_STATUS);
|
||||
}
|
||||
|
||||
static void vio_legacy_reset(VirtIODevice *vdev)
|
||||
{
|
||||
/* 0 status means a reset. */
|
||||
iowrite8(vdev, 0, vdev->addr + VIRTIO_PCI_STATUS);
|
||||
}
|
||||
|
||||
static u64 vio_legacy_get_features(VirtIODevice *vdev)
|
||||
{
|
||||
return ioread32(vdev, vdev->addr + VIRTIO_PCI_HOST_FEATURES);
|
||||
}
|
||||
|
||||
static NTSTATUS vio_legacy_set_features(VirtIODevice *vdev, u64 features)
|
||||
{
|
||||
/* Give virtio_ring a chance to accept features. */
|
||||
vring_transport_features(vdev, &features);
|
||||
|
||||
/* Make sure we don't have any features > 32 bits! */
|
||||
ASSERT((u32)features == features);
|
||||
iowrite32(vdev, (u32)features, vdev->addr + VIRTIO_PCI_GUEST_FEATURES);
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static u16 vio_legacy_set_config_vector(VirtIODevice *vdev, u16 vector)
|
||||
{
|
||||
/* Setup the vector used for configuration events */
|
||||
iowrite16(vdev, vector, vdev->addr + VIRTIO_MSI_CONFIG_VECTOR);
|
||||
/* Verify we had enough resources to assign the vector */
|
||||
/* Will also flush the write out to device */
|
||||
return ioread16(vdev, vdev->addr + VIRTIO_MSI_CONFIG_VECTOR);
|
||||
}
|
||||
|
||||
static u16 vio_legacy_set_queue_vector(struct virtqueue *vq, u16 vector)
|
||||
{
|
||||
VirtIODevice *vdev = vq->vdev;
|
||||
|
||||
iowrite16(vdev, (u16)vq->index, vdev->addr + VIRTIO_PCI_QUEUE_SEL);
|
||||
iowrite16(vdev, vector, vdev->addr + VIRTIO_MSI_QUEUE_VECTOR);
|
||||
return ioread16(vdev, vdev->addr + VIRTIO_MSI_QUEUE_VECTOR);
|
||||
}
|
||||
|
||||
static NTSTATUS vio_legacy_query_vq_alloc(VirtIODevice *vdev,
|
||||
unsigned index,
|
||||
unsigned short *pNumEntries,
|
||||
unsigned long *pRingSize,
|
||||
unsigned long *pHeapSize)
|
||||
{
|
||||
unsigned long ring_size, data_size;
|
||||
u16 num;
|
||||
|
||||
/* Select the queue we're interested in */
|
||||
iowrite16(vdev, (u16)index, vdev->addr + VIRTIO_PCI_QUEUE_SEL);
|
||||
|
||||
/* Check if queue is either not available or already active. */
|
||||
num = ioread16(vdev, vdev->addr + VIRTIO_PCI_QUEUE_NUM);
|
||||
if (!num || ioread32(vdev, vdev->addr + VIRTIO_PCI_QUEUE_PFN)) {
|
||||
return STATUS_NOT_FOUND;
|
||||
}
|
||||
|
||||
ring_size = ROUND_TO_PAGES(vring_size(num, VIRTIO_PCI_VRING_ALIGN, false));
|
||||
data_size = ROUND_TO_PAGES(vring_control_block_size(num, false));
|
||||
|
||||
*pNumEntries = num;
|
||||
*pRingSize = ring_size + data_size;
|
||||
*pHeapSize = 0;
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static NTSTATUS vio_legacy_setup_vq(struct virtqueue **queue,
|
||||
VirtIODevice *vdev,
|
||||
VirtIOQueueInfo *info,
|
||||
unsigned index,
|
||||
u16 msix_vec)
|
||||
{
|
||||
struct virtqueue *vq;
|
||||
unsigned long ring_size, heap_size;
|
||||
NTSTATUS status;
|
||||
|
||||
/* Select the queue and query allocation parameters */
|
||||
status = vio_legacy_query_vq_alloc(vdev, index, &info->num, &ring_size, &heap_size);
|
||||
if (!NT_SUCCESS(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
info->queue = mem_alloc_contiguous_pages(vdev, ring_size);
|
||||
if (info->queue == NULL) {
|
||||
return STATUS_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
|
||||
/* activate the queue */
|
||||
iowrite32(vdev, (u32)(mem_get_physical_address(vdev, info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT),
|
||||
vdev->addr + VIRTIO_PCI_QUEUE_PFN);
|
||||
|
||||
/* create the vring */
|
||||
vq = vring_new_virtqueue_split(index, info->num,
|
||||
VIRTIO_PCI_VRING_ALIGN, vdev,
|
||||
info->queue, vp_notify,
|
||||
(u8 *)info->queue + ROUND_TO_PAGES(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN, false)));
|
||||
if (!vq) {
|
||||
status = STATUS_INSUFFICIENT_RESOURCES;
|
||||
goto err_activate_queue;
|
||||
}
|
||||
|
||||
vq->notification_addr = (void *)(vdev->addr + VIRTIO_PCI_QUEUE_NOTIFY);
|
||||
|
||||
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
|
||||
msix_vec = vdev->device->set_queue_vector(vq, msix_vec);
|
||||
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
|
||||
status = STATUS_DEVICE_BUSY;
|
||||
goto err_assign;
|
||||
}
|
||||
}
|
||||
|
||||
*queue = vq;
|
||||
return STATUS_SUCCESS;
|
||||
|
||||
err_assign:
|
||||
err_activate_queue:
|
||||
iowrite32(vdev, 0, vdev->addr + VIRTIO_PCI_QUEUE_PFN);
|
||||
mem_free_contiguous_pages(vdev, info->queue);
|
||||
return status;
|
||||
}
|
||||
|
||||
static void vio_legacy_del_vq(VirtIOQueueInfo *info)
|
||||
{
|
||||
struct virtqueue *vq = info->vq;
|
||||
VirtIODevice *vdev = vq->vdev;
|
||||
|
||||
iowrite16(vdev, (u16)vq->index, vdev->addr + VIRTIO_PCI_QUEUE_SEL);
|
||||
|
||||
if (vdev->msix_used) {
|
||||
iowrite16(vdev, VIRTIO_MSI_NO_VECTOR,
|
||||
vdev->addr + VIRTIO_MSI_QUEUE_VECTOR);
|
||||
/* Flush the write out to device */
|
||||
ioread8(vdev, vdev->addr + VIRTIO_PCI_ISR);
|
||||
}
|
||||
|
||||
/* Select and deactivate the queue */
|
||||
iowrite32(vdev, 0, vdev->addr + VIRTIO_PCI_QUEUE_PFN);
|
||||
|
||||
mem_free_contiguous_pages(vdev, info->queue);
|
||||
}
|
||||
|
||||
static const struct virtio_device_ops virtio_pci_device_ops = {
|
||||
/* .get_config = */ vio_legacy_get_config,
|
||||
/* .set_config = */ vio_legacy_set_config,
|
||||
/* .get_config_generation = */ NULL,
|
||||
/* .get_status = */ vio_legacy_get_status,
|
||||
/* .set_status = */ vio_legacy_set_status,
|
||||
/* .reset = */ vio_legacy_reset,
|
||||
/* .get_features = */ vio_legacy_get_features,
|
||||
/* .set_features = */ vio_legacy_set_features,
|
||||
/* .set_config_vector = */ vio_legacy_set_config_vector,
|
||||
/* .set_queue_vector = */ vio_legacy_set_queue_vector,
|
||||
/* .query_queue_alloc = */ vio_legacy_query_vq_alloc,
|
||||
/* .setup_queue = */ vio_legacy_setup_vq,
|
||||
/* .delete_queue = */ vio_legacy_del_vq,
|
||||
};
|
||||
|
||||
/* Legacy device initialization */
|
||||
NTSTATUS vio_legacy_initialize(VirtIODevice *vdev)
|
||||
{
|
||||
size_t length = pci_get_resource_len(vdev, 0);
|
||||
vdev->addr = (ULONG_PTR)pci_map_address_range(vdev, 0, 0, length);
|
||||
|
||||
if (!vdev->addr) {
|
||||
return STATUS_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
|
||||
vdev->isr = (u8 *)vdev->addr + VIRTIO_PCI_ISR;
|
||||
|
||||
vdev->device = &virtio_pci_device_ops;
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
|
@ -1,597 +0,0 @@
|
|||
/*
|
||||
* Virtio PCI driver - modern (virtio 1.0) device support
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
* Copyright Red Hat, Inc. 2014
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
* Rusty Russell <rusty@rustcorp.com.au>
|
||||
* Michael S. Tsirkin <mst@redhat.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met :
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and / or other materials provided with the distribution.
|
||||
* 3. Neither the names of the copyright holders nor the names of their contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
#include "osdep.h"
|
||||
#define VIRTIO_PCI_NO_LEGACY
|
||||
#include "virtio_pci.h"
|
||||
#include "VirtIO.h"
|
||||
#include "kdebugprint.h"
|
||||
#include "virtio_ring.h"
|
||||
#include "virtio_pci_common.h"
|
||||
#include "windows/virtio_ring_allocation.h"
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef WPP_EVENT_TRACING
|
||||
#include "VirtIOPCIModern.tmh"
|
||||
#endif
|
||||
|
||||
static void *vio_modern_map_capability(VirtIODevice *vdev, int cap_offset,
|
||||
size_t minlen, u32 alignment,
|
||||
u32 start, u32 size, size_t *len)
|
||||
{
|
||||
u8 bar;
|
||||
u32 bar_offset, bar_length;
|
||||
void *addr;
|
||||
|
||||
pci_read_config_byte(vdev, cap_offset + offsetof(struct virtio_pci_cap, bar), &bar);
|
||||
pci_read_config_dword(vdev, cap_offset + offsetof(struct virtio_pci_cap, offset), &bar_offset);
|
||||
pci_read_config_dword(vdev, cap_offset + offsetof(struct virtio_pci_cap, length), &bar_length);
|
||||
|
||||
if (start + minlen > bar_length) {
|
||||
DPrintf(0, "bar %i cap is not large enough to map %zu bytes at offset %u\n", bar, minlen, start);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bar_length -= start;
|
||||
bar_offset += start;
|
||||
|
||||
if (bar_offset & (alignment - 1)) {
|
||||
DPrintf(0, "bar %i offset %u not aligned to %u\n", bar, bar_offset, alignment);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (bar_length > size) {
|
||||
bar_length = size;
|
||||
}
|
||||
|
||||
if (len) {
|
||||
*len = bar_length;
|
||||
}
|
||||
|
||||
if (bar_offset + minlen > pci_get_resource_len(vdev, bar)) {
|
||||
DPrintf(0, "bar %i is not large enough to map %zu bytes at offset %u\n", bar, minlen, bar_offset);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
addr = pci_map_address_range(vdev, bar, bar_offset, bar_length);
|
||||
if (!addr) {
|
||||
DPrintf(0, "unable to map %u bytes at bar %i offset %u\n", bar_length, bar, bar_offset);
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void *vio_modern_map_simple_capability(VirtIODevice *vdev, int cap_offset, size_t length, u32 alignment)
|
||||
{
|
||||
return vio_modern_map_capability(
|
||||
vdev,
|
||||
cap_offset,
|
||||
length, // minlen
|
||||
alignment,
|
||||
0, // offset
|
||||
(u32)length, // size is equal to minlen
|
||||
NULL); // not interested in the full length
|
||||
}
|
||||
|
||||
static void vio_modern_get_config(VirtIODevice *vdev, unsigned offset,
|
||||
void *buf, unsigned len)
|
||||
{
|
||||
if (!vdev->config) {
|
||||
ASSERT(!"Device has no config to read");
|
||||
return;
|
||||
}
|
||||
if (offset + len > vdev->config_len) {
|
||||
ASSERT(!"Can't read beyond the config length");
|
||||
return;
|
||||
}
|
||||
|
||||
switch (len) {
|
||||
case 1:
|
||||
*(u8 *)buf = ioread8(vdev, vdev->config + offset);
|
||||
break;
|
||||
case 2:
|
||||
*(u16 *)buf = ioread16(vdev, vdev->config + offset);
|
||||
break;
|
||||
case 4:
|
||||
*(u32 *)buf = ioread32(vdev, vdev->config + offset);
|
||||
break;
|
||||
default:
|
||||
ASSERT(!"Only 1, 2, 4 byte config reads are supported");
|
||||
}
|
||||
}
|
||||
|
||||
static void vio_modern_set_config(VirtIODevice *vdev, unsigned offset,
|
||||
const void *buf, unsigned len)
|
||||
{
|
||||
if (!vdev->config) {
|
||||
ASSERT(!"Device has no config to write");
|
||||
return;
|
||||
}
|
||||
if (offset + len > vdev->config_len) {
|
||||
ASSERT(!"Can't write beyond the config length");
|
||||
return;
|
||||
}
|
||||
|
||||
switch (len) {
|
||||
case 1:
|
||||
iowrite8(vdev, *(u8 *)buf, vdev->config + offset);
|
||||
break;
|
||||
case 2:
|
||||
iowrite16(vdev, *(u16 *)buf, vdev->config + offset);
|
||||
break;
|
||||
case 4:
|
||||
iowrite32(vdev, *(u32 *)buf, vdev->config + offset);
|
||||
break;
|
||||
default:
|
||||
ASSERT(!"Only 1, 2, 4 byte config writes are supported");
|
||||
}
|
||||
}
|
||||
|
||||
static u32 vio_modern_get_generation(VirtIODevice *vdev)
|
||||
{
|
||||
return ioread8(vdev, &vdev->common->config_generation);
|
||||
}
|
||||
|
||||
static u8 vio_modern_get_status(VirtIODevice *vdev)
|
||||
{
|
||||
return ioread8(vdev, &vdev->common->device_status);
|
||||
}
|
||||
|
||||
static void vio_modern_set_status(VirtIODevice *vdev, u8 status)
|
||||
{
|
||||
/* We should never be setting status to 0. */
|
||||
ASSERT(status != 0);
|
||||
iowrite8(vdev, status, &vdev->common->device_status);
|
||||
}
|
||||
|
||||
static void vio_modern_reset(VirtIODevice *vdev)
|
||||
{
|
||||
/* 0 status means a reset. */
|
||||
iowrite8(vdev, 0, &vdev->common->device_status);
|
||||
/* After writing 0 to device_status, the driver MUST wait for a read of
|
||||
* device_status to return 0 before reinitializing the device.
|
||||
* This will flush out the status write, and flush in device writes,
|
||||
* including MSI-X interrupts, if any.
|
||||
*/
|
||||
while (ioread8(vdev, &vdev->common->device_status)) {
|
||||
vdev_sleep(vdev, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 vio_modern_get_features(VirtIODevice *vdev)
|
||||
{
|
||||
u64 features;
|
||||
|
||||
iowrite32(vdev, 0, &vdev->common->device_feature_select);
|
||||
features = ioread32(vdev, &vdev->common->device_feature);
|
||||
iowrite32(vdev, 1, &vdev->common->device_feature_select);
|
||||
features |= ((u64)ioread32(vdev, &vdev->common->device_feature) << 32);
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static NTSTATUS vio_modern_set_features(VirtIODevice *vdev, u64 features)
|
||||
{
|
||||
/* Give virtio_ring a chance to accept features. */
|
||||
vring_transport_features(vdev, &features);
|
||||
|
||||
if (!virtio_is_feature_enabled(features, VIRTIO_F_VERSION_1)) {
|
||||
DPrintf(0, "virtio: device uses modern interface but does not have VIRTIO_F_VERSION_1\n", 0);
|
||||
return STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
iowrite32(vdev, 0, &vdev->common->guest_feature_select);
|
||||
iowrite32(vdev, (u32)features, &vdev->common->guest_feature);
|
||||
iowrite32(vdev, 1, &vdev->common->guest_feature_select);
|
||||
iowrite32(vdev, features >> 32, &vdev->common->guest_feature);
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static u16 vio_modern_set_config_vector(VirtIODevice *vdev, u16 vector)
|
||||
{
|
||||
/* Setup the vector used for configuration events */
|
||||
iowrite16(vdev, vector, &vdev->common->msix_config);
|
||||
/* Verify we had enough resources to assign the vector */
|
||||
/* Will also flush the write out to device */
|
||||
return ioread16(vdev, &vdev->common->msix_config);
|
||||
}
|
||||
|
||||
static u16 vio_modern_set_queue_vector(struct virtqueue *vq, u16 vector)
|
||||
{
|
||||
VirtIODevice *vdev = vq->vdev;
|
||||
volatile struct virtio_pci_common_cfg *cfg = vdev->common;
|
||||
|
||||
iowrite16(vdev, (u16)vq->index, &cfg->queue_select);
|
||||
iowrite16(vdev, vector, &cfg->queue_msix_vector);
|
||||
return ioread16(vdev, &cfg->queue_msix_vector);
|
||||
}
|
||||
|
||||
static size_t vring_pci_size(u16 num, bool packed)
|
||||
{
|
||||
/* We only need a cacheline separation. */
|
||||
return (size_t)ROUND_TO_PAGES(vring_size(num, SMP_CACHE_BYTES, packed));
|
||||
}
|
||||
|
||||
static NTSTATUS vio_modern_query_vq_alloc(VirtIODevice *vdev,
|
||||
unsigned index,
|
||||
unsigned short *pNumEntries,
|
||||
unsigned long *pRingSize,
|
||||
unsigned long *pHeapSize)
|
||||
{
|
||||
volatile struct virtio_pci_common_cfg *cfg = vdev->common;
|
||||
u16 num;
|
||||
|
||||
if (index >= ioread16(vdev, &cfg->num_queues)) {
|
||||
return STATUS_NOT_FOUND;
|
||||
}
|
||||
|
||||
/* Select the queue we're interested in */
|
||||
iowrite16(vdev, (u16)index, &cfg->queue_select);
|
||||
|
||||
/* Check if queue is either not available or already active. */
|
||||
num = ioread16(vdev, &cfg->queue_size);
|
||||
/* QEMU has a bug where queues don't revert to inactive on device
|
||||
* reset. Skip checking the queue_enable field until it is fixed.
|
||||
*/
|
||||
if (!num /*|| ioread16(vdev, &cfg->queue_enable)*/) {
|
||||
return STATUS_NOT_FOUND;
|
||||
}
|
||||
|
||||
if (num & (num - 1)) {
|
||||
DPrintf(0, "%p: bad queue size %u", vdev, num);
|
||||
return STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
*pNumEntries = num;
|
||||
*pRingSize = (unsigned long)vring_pci_size(num, vdev->packed_ring);
|
||||
*pHeapSize = vring_control_block_size(num, vdev->packed_ring);
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static NTSTATUS vio_modern_setup_vq(struct virtqueue **queue,
|
||||
VirtIODevice *vdev,
|
||||
VirtIOQueueInfo *info,
|
||||
unsigned index,
|
||||
u16 msix_vec)
|
||||
{
|
||||
volatile struct virtio_pci_common_cfg *cfg = vdev->common;
|
||||
struct virtqueue *vq;
|
||||
void *vq_addr;
|
||||
u16 off;
|
||||
unsigned long ring_size, heap_size;
|
||||
NTSTATUS status;
|
||||
|
||||
/* select the queue and query allocation parameters */
|
||||
status = vio_modern_query_vq_alloc(vdev, index, &info->num, &ring_size, &heap_size);
|
||||
if (!NT_SUCCESS(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
/* get offset of notification word for this vq */
|
||||
off = ioread16(vdev, &cfg->queue_notify_off);
|
||||
|
||||
/* try to allocate contiguous pages, scale down on failure */
|
||||
while (!(info->queue = mem_alloc_contiguous_pages(vdev, vring_pci_size(info->num, vdev->packed_ring)))) {
|
||||
if (info->num > 0) {
|
||||
info->num /= 2;
|
||||
} else {
|
||||
return STATUS_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
}
|
||||
|
||||
vq_addr = mem_alloc_nonpaged_block(vdev, heap_size);
|
||||
if (vq_addr == NULL) {
|
||||
return STATUS_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
|
||||
/* create the vring */
|
||||
if (vdev->packed_ring) {
|
||||
vq = vring_new_virtqueue_packed(index, info->num,
|
||||
SMP_CACHE_BYTES, vdev,
|
||||
info->queue, vp_notify, vq_addr);
|
||||
} else {
|
||||
vq = vring_new_virtqueue_split(index, info->num,
|
||||
SMP_CACHE_BYTES, vdev,
|
||||
info->queue, vp_notify, vq_addr);
|
||||
}
|
||||
|
||||
if (!vq) {
|
||||
status = STATUS_INSUFFICIENT_RESOURCES;
|
||||
goto err_new_queue;
|
||||
}
|
||||
|
||||
/* activate the queue */
|
||||
iowrite16(vdev, info->num, &cfg->queue_size);
|
||||
iowrite64_twopart(vdev, mem_get_physical_address(vdev, info->queue),
|
||||
&cfg->queue_desc_lo, &cfg->queue_desc_hi);
|
||||
iowrite64_twopart(vdev, mem_get_physical_address(vdev, vq->avail_va),
|
||||
&cfg->queue_avail_lo, &cfg->queue_avail_hi);
|
||||
iowrite64_twopart(vdev, mem_get_physical_address(vdev, vq->used_va),
|
||||
&cfg->queue_used_lo, &cfg->queue_used_hi);
|
||||
|
||||
if (vdev->notify_base) {
|
||||
/* offset should not wrap */
|
||||
if ((u64)off * vdev->notify_offset_multiplier + 2
|
||||
> vdev->notify_len) {
|
||||
DPrintf(0,
|
||||
"%p: bad notification offset %u (x %u) "
|
||||
"for queue %u > %zd",
|
||||
vdev,
|
||||
off, vdev->notify_offset_multiplier,
|
||||
index, vdev->notify_len);
|
||||
status = STATUS_INVALID_PARAMETER;
|
||||
goto err_map_notify;
|
||||
}
|
||||
vq->notification_addr = (void *)(vdev->notify_base +
|
||||
off * vdev->notify_offset_multiplier);
|
||||
} else {
|
||||
vq->notification_addr = vio_modern_map_capability(vdev,
|
||||
vdev->notify_map_cap, 2, 2,
|
||||
off * vdev->notify_offset_multiplier, 2,
|
||||
NULL);
|
||||
}
|
||||
|
||||
if (!vq->notification_addr) {
|
||||
status = STATUS_INSUFFICIENT_RESOURCES;
|
||||
goto err_map_notify;
|
||||
}
|
||||
|
||||
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
|
||||
msix_vec = vdev->device->set_queue_vector(vq, msix_vec);
|
||||
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
|
||||
status = STATUS_DEVICE_BUSY;
|
||||
goto err_assign_vector;
|
||||
}
|
||||
}
|
||||
|
||||
/* enable the queue */
|
||||
iowrite16(vdev, 1, &vdev->common->queue_enable);
|
||||
|
||||
*queue = vq;
|
||||
return STATUS_SUCCESS;
|
||||
|
||||
err_assign_vector:
|
||||
err_map_notify:
|
||||
virtqueue_shutdown(vq);
|
||||
err_new_queue:
|
||||
mem_free_nonpaged_block(vdev, vq_addr);
|
||||
mem_free_contiguous_pages(vdev, info->queue);
|
||||
return status;
|
||||
}
|
||||
|
||||
static void vio_modern_del_vq(VirtIOQueueInfo *info)
|
||||
{
|
||||
struct virtqueue *vq = info->vq;
|
||||
VirtIODevice *vdev = vq->vdev;
|
||||
|
||||
iowrite16(vdev, (u16)vq->index, &vdev->common->queue_select);
|
||||
|
||||
if (vdev->msix_used) {
|
||||
iowrite16(vdev, VIRTIO_MSI_NO_VECTOR, &vdev->common->queue_msix_vector);
|
||||
/* Flush the write out to device */
|
||||
ioread16(vdev, &vdev->common->queue_msix_vector);
|
||||
}
|
||||
|
||||
virtqueue_shutdown(vq);
|
||||
|
||||
mem_free_nonpaged_block(vdev, vq);
|
||||
mem_free_contiguous_pages(vdev, info->queue);
|
||||
}
|
||||
|
||||
static const struct virtio_device_ops virtio_pci_device_ops = {
|
||||
/* .get_config = */ vio_modern_get_config,
|
||||
/* .set_config = */ vio_modern_set_config,
|
||||
/* .get_config_generation = */ vio_modern_get_generation,
|
||||
/* .get_status = */ vio_modern_get_status,
|
||||
/* .set_status = */ vio_modern_set_status,
|
||||
/* .reset = */ vio_modern_reset,
|
||||
/* .get_features = */ vio_modern_get_features,
|
||||
/* .set_features = */ vio_modern_set_features,
|
||||
/* .set_config_vector = */ vio_modern_set_config_vector,
|
||||
/* .set_queue_vector = */ vio_modern_set_queue_vector,
|
||||
/* .query_queue_alloc = */ vio_modern_query_vq_alloc,
|
||||
/* .setup_queue = */ vio_modern_setup_vq,
|
||||
/* .delete_queue = */ vio_modern_del_vq,
|
||||
};
|
||||
|
||||
static u8 find_next_pci_vendor_capability(VirtIODevice *vdev, u8 offset)
|
||||
{
|
||||
u8 id = 0;
|
||||
int iterations = 48;
|
||||
|
||||
if (pci_read_config_byte(vdev, offset, &offset) != 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (iterations-- && offset >= 0x40) {
|
||||
offset &= ~3;
|
||||
if (pci_read_config_byte(vdev, offset + offsetof(PCI_CAPABILITIES_HEADER,
|
||||
CapabilityID), &id) != 0) {
|
||||
break;
|
||||
}
|
||||
if (id == 0xFF) {
|
||||
break;
|
||||
}
|
||||
if (id == PCI_CAPABILITY_ID_VENDOR_SPECIFIC) {
|
||||
return offset;
|
||||
}
|
||||
if (pci_read_config_byte(vdev, offset + offsetof(PCI_CAPABILITIES_HEADER,
|
||||
Next), &offset) != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 find_first_pci_vendor_capability(VirtIODevice *vdev)
|
||||
{
|
||||
u8 hdr_type, offset;
|
||||
u16 status;
|
||||
|
||||
if (pci_read_config_byte(vdev, offsetof(PCI_COMMON_HEADER, HeaderType), &hdr_type) != 0) {
|
||||
return 0;
|
||||
}
|
||||
if (pci_read_config_word(vdev, offsetof(PCI_COMMON_HEADER, Status), &status) != 0) {
|
||||
return 0;
|
||||
}
|
||||
if ((status & PCI_STATUS_CAPABILITIES_LIST) == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (hdr_type & ~PCI_MULTIFUNCTION) {
|
||||
case PCI_BRIDGE_TYPE:
|
||||
offset = offsetof(PCI_COMMON_HEADER, u.type1.CapabilitiesPtr);
|
||||
break;
|
||||
case PCI_CARDBUS_BRIDGE_TYPE:
|
||||
offset = offsetof(PCI_COMMON_HEADER, u.type2.CapabilitiesPtr);
|
||||
break;
|
||||
default:
|
||||
offset = offsetof(PCI_COMMON_HEADER, u.type0.CapabilitiesPtr);
|
||||
break;
|
||||
}
|
||||
|
||||
if (offset != 0) {
|
||||
offset = find_next_pci_vendor_capability(vdev, offset);
|
||||
}
|
||||
return offset;
|
||||
}
|
||||
|
||||
/* Populate Offsets with virtio vendor capability offsets within the PCI config space */
|
||||
static void find_pci_vendor_capabilities(VirtIODevice *vdev, int *Offsets, size_t nOffsets)
|
||||
{
|
||||
u8 offset = find_first_pci_vendor_capability(vdev);
|
||||
while (offset > 0) {
|
||||
u8 cfg_type, bar;
|
||||
pci_read_config_byte(vdev, offset + offsetof(struct virtio_pci_cap, cfg_type), &cfg_type);
|
||||
pci_read_config_byte(vdev, offset + offsetof(struct virtio_pci_cap, bar), &bar);
|
||||
|
||||
if (bar < PCI_TYPE0_ADDRESSES &&
|
||||
cfg_type < nOffsets &&
|
||||
pci_get_resource_len(vdev, bar) > 0) {
|
||||
Offsets[cfg_type] = offset;
|
||||
}
|
||||
|
||||
offset = find_next_pci_vendor_capability(vdev, offset + offsetof(PCI_CAPABILITIES_HEADER, Next));
|
||||
}
|
||||
}
|
||||
|
||||
/* Modern device initialization */
|
||||
NTSTATUS vio_modern_initialize(VirtIODevice *vdev)
|
||||
{
|
||||
int capabilities[VIRTIO_PCI_CAP_PCI_CFG];
|
||||
|
||||
u32 notify_length;
|
||||
u32 notify_offset;
|
||||
|
||||
RtlZeroMemory(capabilities, sizeof(capabilities));
|
||||
find_pci_vendor_capabilities(vdev, capabilities, VIRTIO_PCI_CAP_PCI_CFG);
|
||||
|
||||
/* Check for a common config, if not found use legacy mode */
|
||||
if (!capabilities[VIRTIO_PCI_CAP_COMMON_CFG]) {
|
||||
DPrintf(0, "%s(%p): device not found\n", __FUNCTION__, vdev);
|
||||
return STATUS_DEVICE_NOT_CONNECTED;
|
||||
}
|
||||
|
||||
/* Check isr and notify caps, if not found fail */
|
||||
if (!capabilities[VIRTIO_PCI_CAP_ISR_CFG] || !capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG]) {
|
||||
DPrintf(0, "%s(%p): missing capabilities %i/%i/%i\n",
|
||||
__FUNCTION__, vdev,
|
||||
capabilities[VIRTIO_PCI_CAP_COMMON_CFG],
|
||||
capabilities[VIRTIO_PCI_CAP_ISR_CFG],
|
||||
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG]);
|
||||
return STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
/* Map bars according to the capabilities */
|
||||
vdev->common = vio_modern_map_simple_capability(vdev,
|
||||
capabilities[VIRTIO_PCI_CAP_COMMON_CFG],
|
||||
sizeof(struct virtio_pci_common_cfg), 4);
|
||||
if (!vdev->common) {
|
||||
return STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
vdev->isr = vio_modern_map_simple_capability(vdev,
|
||||
capabilities[VIRTIO_PCI_CAP_ISR_CFG],
|
||||
sizeof(u8), 1);
|
||||
if (!vdev->isr) {
|
||||
return STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
/* Read notify_off_multiplier from config space. */
|
||||
pci_read_config_dword(vdev,
|
||||
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG] + offsetof(struct virtio_pci_notify_cap,
|
||||
notify_off_multiplier),
|
||||
&vdev->notify_offset_multiplier);
|
||||
|
||||
/* Read notify length and offset from config space. */
|
||||
pci_read_config_dword(vdev,
|
||||
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG] + offsetof(struct virtio_pci_notify_cap,
|
||||
cap.length),
|
||||
¬ify_length);
|
||||
pci_read_config_dword(vdev,
|
||||
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG] + offsetof(struct virtio_pci_notify_cap,
|
||||
cap.offset),
|
||||
¬ify_offset);
|
||||
|
||||
/* Map the notify capability if it's small enough.
|
||||
* Otherwise, map each VQ individually later.
|
||||
*/
|
||||
if (notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
|
||||
vdev->notify_base = vio_modern_map_capability(vdev,
|
||||
capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG], 2, 2,
|
||||
0, notify_length,
|
||||
&vdev->notify_len);
|
||||
if (!vdev->notify_base) {
|
||||
return STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
} else {
|
||||
vdev->notify_map_cap = capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG];
|
||||
}
|
||||
|
||||
/* Map the device config capability, the PAGE_SIZE size is a guess */
|
||||
if (capabilities[VIRTIO_PCI_CAP_DEVICE_CFG]) {
|
||||
vdev->config = vio_modern_map_capability(vdev,
|
||||
capabilities[VIRTIO_PCI_CAP_DEVICE_CFG], 0, 4,
|
||||
0, PAGE_SIZE,
|
||||
&vdev->config_len);
|
||||
if (!vdev->config) {
|
||||
return STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
}
|
||||
|
||||
vdev->device = &virtio_pci_device_ops;
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
|
@ -1,651 +0,0 @@
|
|||
/*
|
||||
* Packed virtio ring manipulation routines
|
||||
*
|
||||
* Copyright 2019 Red Hat, Inc.
|
||||
*
|
||||
* Authors:
|
||||
* Yuri Benditovich <ybendito@redhat.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met :
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and / or other materials provided with the distribution.
|
||||
* 3. Neither the names of the copyright holders nor the names of their contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "osdep.h"
|
||||
#include "virtio_pci.h"
|
||||
#include "VirtIO.h"
|
||||
#include "kdebugprint.h"
|
||||
#include "virtio_ring.h"
|
||||
#include "windows/virtio_ring_allocation.h"
|
||||
|
||||
#include <pshpack1.h>
|
||||
|
||||
struct vring_packed_desc_event {
|
||||
/* Descriptor Ring Change Event Offset/Wrap Counter. */
|
||||
__le16 off_wrap;
|
||||
/* Descriptor Ring Change Event Flags. */
|
||||
__le16 flags;
|
||||
};
|
||||
|
||||
struct vring_packed_desc {
|
||||
/* Buffer Address. */
|
||||
__virtio64 addr;
|
||||
/* Buffer Length. */
|
||||
__le32 len;
|
||||
/* Buffer ID. */
|
||||
__le16 id;
|
||||
/* The flags depending on descriptor type. */
|
||||
__le16 flags;
|
||||
};
|
||||
|
||||
#include <poppack.h>
|
||||
|
||||
#define BUG_ON(condition) { if (condition) { KeBugCheck(0xE0E1E2E3); }}
|
||||
#define BAD_RING(vq, fmt, ...) DPrintf(0, "%s: queue %d: " fmt, __FUNCTION__, vq->vq.index, __VA_ARGS__); BUG_ON(true)
|
||||
|
||||
/* This marks a buffer as continuing via the next field. */
|
||||
#define VRING_DESC_F_NEXT 1
|
||||
/* This marks a buffer as write-only (otherwise read-only). */
|
||||
#define VRING_DESC_F_WRITE 2
|
||||
/* This means the buffer contains a list of buffer descriptors. */
|
||||
#define VRING_DESC_F_INDIRECT 4
|
||||
|
||||
/*
|
||||
* Mark a descriptor as available or used in packed ring.
|
||||
* Notice: they are defined as shifts instead of shifted values.
|
||||
*/
|
||||
#define VRING_PACKED_DESC_F_AVAIL 7
|
||||
#define VRING_PACKED_DESC_F_USED 15
|
||||
|
||||
/* Enable events in packed ring. */
|
||||
#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
|
||||
/* Disable events in packed ring. */
|
||||
#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
|
||||
|
||||
/*
|
||||
* Enable events for a specific descriptor in packed ring.
|
||||
* (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
|
||||
* Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
|
||||
*/
|
||||
#define VRING_PACKED_EVENT_FLAG_DESC 0x2
|
||||
/*
|
||||
* Wrap counter bit shift in event suppression structure
|
||||
* of packed ring.
|
||||
*/
|
||||
#define VRING_PACKED_EVENT_F_WRAP_CTR 15
|
||||
|
||||
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
|
||||
/* Assuming a given event_idx value from the other side, if
|
||||
* we have just incremented index from old to new_idx,
|
||||
* should we trigger an event?
|
||||
*/
|
||||
static inline bool vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
|
||||
{
|
||||
/* Note: Xen has similar logic for notification hold-off
|
||||
* in include/xen/interface/io/ring.h with req_event and req_prod
|
||||
* corresponding to event_idx + 1 and new_idx respectively.
|
||||
* Note also that req_event and req_prod in Xen start at 1,
|
||||
* event indexes in virtio start at 0. */
|
||||
return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
|
||||
}
|
||||
|
||||
struct vring_desc_state_packed {
|
||||
void *data; /* Data for callback. */
|
||||
u16 num; /* Descriptor list length. */
|
||||
u16 next; /* The next desc state in a list. */
|
||||
u16 last; /* The last desc state in a list. */
|
||||
};
|
||||
|
||||
struct virtqueue_packed {
|
||||
struct virtqueue vq;
|
||||
/* Number we've added since last sync. */
|
||||
unsigned int num_added;
|
||||
/* Head of free buffer list. */
|
||||
unsigned int free_head;
|
||||
/* Number of free descriptors */
|
||||
unsigned int num_free;
|
||||
/* Last used index we've seen. */
|
||||
u16 last_used_idx;
|
||||
/* Avail used flags. */
|
||||
u16 avail_used_flags;
|
||||
struct
|
||||
{
|
||||
/* Driver ring wrap counter. */
|
||||
bool avail_wrap_counter;
|
||||
/* Device ring wrap counter. */
|
||||
bool used_wrap_counter;
|
||||
/* Index of the next avail descriptor. */
|
||||
u16 next_avail_idx;
|
||||
/*
|
||||
* Last written value to driver->flags in
|
||||
* guest byte order.
|
||||
*/
|
||||
u16 event_flags_shadow;
|
||||
struct {
|
||||
unsigned int num;
|
||||
struct vring_packed_desc *desc;
|
||||
struct vring_packed_desc_event *driver;
|
||||
struct vring_packed_desc_event *device;
|
||||
} vring;
|
||||
/* Per-descriptor state. */
|
||||
struct vring_desc_state_packed *desc_state;
|
||||
} packed;
|
||||
struct vring_desc_state_packed desc_states[];
|
||||
};
|
||||
|
||||
#define packedvq(vq) ((struct virtqueue_packed *)vq)
|
||||
|
||||
unsigned int vring_control_block_size_packed(u16 qsize)
|
||||
{
|
||||
return sizeof(struct virtqueue_packed) + sizeof(struct vring_desc_state_packed) * qsize;
|
||||
}
|
||||
|
||||
unsigned long vring_size_packed(unsigned int num, unsigned long align)
|
||||
{
|
||||
/* array of descriptors */
|
||||
unsigned long res = num * sizeof(struct vring_packed_desc);
|
||||
/* driver and device event */
|
||||
res += 2 * sizeof(struct vring_packed_desc_event);
|
||||
return res;
|
||||
}
|
||||
|
||||
static int virtqueue_add_buf_packed(
|
||||
struct virtqueue *_vq, /* the queue */
|
||||
struct scatterlist sg[], /* sg array of length out + in */
|
||||
unsigned int out, /* number of driver->device buffer descriptors in sg */
|
||||
unsigned int in, /* number of device->driver buffer descriptors in sg */
|
||||
void *opaque, /* later returned from virtqueue_get_buf */
|
||||
void *va_indirect, /* VA of the indirect page or NULL */
|
||||
ULONGLONG phys_indirect) /* PA of the indirect page or 0 */
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
unsigned int descs_used;
|
||||
struct vring_packed_desc *desc;
|
||||
u16 head, id, i;
|
||||
|
||||
descs_used = out + in;
|
||||
head = vq->packed.next_avail_idx;
|
||||
id = (u16)vq->free_head;
|
||||
|
||||
BUG_ON(descs_used == 0);
|
||||
BUG_ON(id >= vq->packed.vring.num);
|
||||
|
||||
if (va_indirect && vq->num_free > 0) {
|
||||
desc = va_indirect;
|
||||
for (i = 0; i < descs_used; i++) {
|
||||
desc[i].flags = i < out ? 0 : VRING_DESC_F_WRITE;
|
||||
desc[i].addr = sg[i].physAddr.QuadPart;
|
||||
desc[i].len = sg[i].length;
|
||||
}
|
||||
vq->packed.vring.desc[head].addr = phys_indirect;
|
||||
vq->packed.vring.desc[head].len = descs_used * sizeof(struct vring_packed_desc);
|
||||
vq->packed.vring.desc[head].id = id;
|
||||
|
||||
KeMemoryBarrier();
|
||||
vq->packed.vring.desc[head].flags = VRING_DESC_F_INDIRECT | vq->avail_used_flags;
|
||||
|
||||
DPrintf(5, "Added buffer head %i to Q%d\n", head, vq->vq.index);
|
||||
head++;
|
||||
if (head >= vq->packed.vring.num) {
|
||||
head = 0;
|
||||
vq->packed.avail_wrap_counter ^= 1;
|
||||
vq->avail_used_flags ^=
|
||||
1 << VRING_PACKED_DESC_F_AVAIL |
|
||||
1 << VRING_PACKED_DESC_F_USED;
|
||||
}
|
||||
vq->packed.next_avail_idx = head;
|
||||
/* We're using some buffers from the free list. */
|
||||
vq->num_free -= 1;
|
||||
vq->num_added += 1;
|
||||
|
||||
vq->free_head = vq->packed.desc_state[id].next;
|
||||
|
||||
/* Store token and indirect buffer state. */
|
||||
vq->packed.desc_state[id].num = 1;
|
||||
vq->packed.desc_state[id].data = opaque;
|
||||
vq->packed.desc_state[id].last = id;
|
||||
|
||||
} else {
|
||||
unsigned int n;
|
||||
u16 curr, prev, head_flags;
|
||||
if (vq->num_free < descs_used) {
|
||||
DPrintf(6, "Can't add buffer to Q%d\n", vq->vq.index);
|
||||
return -ENOSPC;
|
||||
}
|
||||
desc = vq->packed.vring.desc;
|
||||
i = head;
|
||||
curr = id;
|
||||
for (n = 0; n < descs_used; n++) {
|
||||
u16 flags = vq->avail_used_flags;
|
||||
flags |= n < out ? 0 : VRING_DESC_F_WRITE;
|
||||
if (n != descs_used - 1) {
|
||||
flags |= VRING_DESC_F_NEXT;
|
||||
}
|
||||
desc[i].addr = sg[n].physAddr.QuadPart;
|
||||
desc[i].len = sg[n].length;
|
||||
desc[i].id = id;
|
||||
if (n == 0) {
|
||||
head_flags = flags;
|
||||
}
|
||||
else {
|
||||
desc[i].flags = flags;
|
||||
}
|
||||
|
||||
prev = curr;
|
||||
curr = vq->packed.desc_state[curr].next;
|
||||
|
||||
if (++i >= vq->packed.vring.num) {
|
||||
i = 0;
|
||||
vq->avail_used_flags ^=
|
||||
1 << VRING_PACKED_DESC_F_AVAIL |
|
||||
1 << VRING_PACKED_DESC_F_USED;
|
||||
}
|
||||
}
|
||||
|
||||
if (i < head)
|
||||
vq->packed.avail_wrap_counter ^= 1;
|
||||
|
||||
/* We're using some buffers from the free list. */
|
||||
vq->num_free -= descs_used;
|
||||
|
||||
/* Update free pointer */
|
||||
vq->packed.next_avail_idx = i;
|
||||
vq->free_head = curr;
|
||||
|
||||
/* Store token. */
|
||||
vq->packed.desc_state[id].num = (u16)descs_used;
|
||||
vq->packed.desc_state[id].data = opaque;
|
||||
vq->packed.desc_state[id].last = prev;
|
||||
|
||||
/*
|
||||
* A driver MUST NOT make the first descriptor in the list
|
||||
* available before all subsequent descriptors comprising
|
||||
* the list are made available.
|
||||
*/
|
||||
KeMemoryBarrier();
|
||||
vq->packed.vring.desc[head].flags = head_flags;
|
||||
vq->num_added += descs_used;
|
||||
|
||||
DPrintf(5, "Added buffer head @%i+%d to Q%d\n", head, descs_used, vq->vq.index);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void detach_buf_packed(struct virtqueue_packed *vq, unsigned int id)
|
||||
{
|
||||
struct vring_desc_state_packed *state = &vq->packed.desc_state[id];
|
||||
|
||||
/* Clear data ptr. */
|
||||
state->data = NULL;
|
||||
|
||||
vq->packed.desc_state[state->last].next = (u16)vq->free_head;
|
||||
vq->free_head = id;
|
||||
vq->num_free += state->num;
|
||||
}
|
||||
|
||||
static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
unsigned int i;
|
||||
void *buf;
|
||||
|
||||
for (i = 0; i < vq->packed.vring.num; i++) {
|
||||
if (!vq->packed.desc_state[i].data)
|
||||
continue;
|
||||
/* detach_buf clears data, so grab it now. */
|
||||
buf = vq->packed.desc_state[i].data;
|
||||
detach_buf_packed(vq, i);
|
||||
return buf;
|
||||
}
|
||||
/* That should have freed everything. */
|
||||
BUG_ON(vq->num_free != vq->packed.vring.num);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
|
||||
if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
|
||||
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
|
||||
vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool is_used_desc_packed(const struct virtqueue_packed *vq,
|
||||
u16 idx, bool used_wrap_counter)
|
||||
{
|
||||
bool avail, used;
|
||||
u16 flags;
|
||||
|
||||
flags = vq->packed.vring.desc[idx].flags;
|
||||
avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
|
||||
used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
|
||||
|
||||
return avail == used && used == used_wrap_counter;
|
||||
}
|
||||
|
||||
static inline bool virtqueue_poll_packed(struct virtqueue_packed *vq, u16 off_wrap)
|
||||
{
|
||||
bool wrap_counter;
|
||||
u16 used_idx;
|
||||
KeMemoryBarrier();
|
||||
|
||||
wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
|
||||
used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
|
||||
|
||||
return is_used_desc_packed(vq, used_idx, wrap_counter);
|
||||
|
||||
}
|
||||
|
||||
static inline unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue_packed *vq)
|
||||
{
|
||||
bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
|
||||
/*
|
||||
* We optimistically turn back on interrupts, then check if there was
|
||||
* more to do.
|
||||
*/
|
||||
|
||||
if (event_suppression_enabled) {
|
||||
vq->packed.vring.driver->off_wrap =
|
||||
vq->last_used_idx |
|
||||
(vq->packed.used_wrap_counter <<
|
||||
VRING_PACKED_EVENT_F_WRAP_CTR);
|
||||
/*
|
||||
* We need to update event offset and event wrap
|
||||
* counter first before updating event flags.
|
||||
*/
|
||||
KeMemoryBarrier();
|
||||
}
|
||||
|
||||
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
|
||||
vq->packed.event_flags_shadow = event_suppression_enabled ?
|
||||
VRING_PACKED_EVENT_FLAG_DESC :
|
||||
VRING_PACKED_EVENT_FLAG_ENABLE;
|
||||
vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
|
||||
}
|
||||
|
||||
return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
|
||||
VRING_PACKED_EVENT_F_WRAP_CTR);
|
||||
}
|
||||
|
||||
static bool virtqueue_enable_cb_packed(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
unsigned last_used_idx = virtqueue_enable_cb_prepare_packed(vq);
|
||||
|
||||
return !virtqueue_poll_packed(vq, (u16)last_used_idx);
|
||||
}
|
||||
|
||||
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
|
||||
u16 used_idx, wrap_counter;
|
||||
u16 bufs;
|
||||
|
||||
/*
|
||||
* We optimistically turn back on interrupts, then check if there was
|
||||
* more to do.
|
||||
*/
|
||||
|
||||
if (event_suppression_enabled) {
|
||||
/* TODO: tune this threshold */
|
||||
bufs = (vq->packed.vring.num - vq->num_free) * 3 / 4;
|
||||
wrap_counter = vq->packed.used_wrap_counter;
|
||||
|
||||
used_idx = vq->last_used_idx + bufs;
|
||||
if (used_idx >= vq->packed.vring.num) {
|
||||
used_idx -= (u16)vq->packed.vring.num;
|
||||
wrap_counter ^= 1;
|
||||
}
|
||||
|
||||
vq->packed.vring.driver->off_wrap = used_idx |
|
||||
(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR);
|
||||
|
||||
/*
|
||||
* We need to update event offset and event wrap
|
||||
* counter first before updating event flags.
|
||||
*/
|
||||
KeMemoryBarrier();
|
||||
}
|
||||
|
||||
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
|
||||
vq->packed.event_flags_shadow = event_suppression_enabled ?
|
||||
VRING_PACKED_EVENT_FLAG_DESC :
|
||||
VRING_PACKED_EVENT_FLAG_ENABLE;
|
||||
vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to update event suppression structure first
|
||||
* before re-checking for more used buffers.
|
||||
*/
|
||||
KeMemoryBarrier();
|
||||
|
||||
if (is_used_desc_packed(vq,
|
||||
vq->last_used_idx,
|
||||
vq->packed.used_wrap_counter)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static BOOLEAN virtqueue_is_interrupt_enabled_packed(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
return vq->packed.event_flags_shadow & VRING_PACKED_EVENT_FLAG_DISABLE;
|
||||
}
|
||||
|
||||
static void virtqueue_shutdown_packed(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
unsigned int num = vq->packed.vring.num;
|
||||
void *pages = vq->packed.vring.desc;
|
||||
unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES;
|
||||
|
||||
RtlZeroMemory(pages, vring_size_packed(num, vring_align));
|
||||
vring_new_virtqueue_packed(
|
||||
_vq->index,
|
||||
num,
|
||||
vring_align,
|
||||
_vq->vdev,
|
||||
pages,
|
||||
_vq->notification_cb,
|
||||
_vq);
|
||||
}
|
||||
|
||||
static inline bool more_used_packed(const struct virtqueue_packed *vq)
|
||||
{
|
||||
return is_used_desc_packed(vq, vq->last_used_idx,
|
||||
vq->packed.used_wrap_counter);
|
||||
}
|
||||
|
||||
static void *virtqueue_get_buf_packed(
|
||||
struct virtqueue *_vq, /* the queue */
|
||||
unsigned int *len) /* number of bytes returned by the device */
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
u16 last_used, id;
|
||||
void *ret;
|
||||
|
||||
if (!more_used_packed(vq)) {
|
||||
DPrintf(6, "%s: No more buffers in queue\n", __FUNCTION__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Only get used elements after they have been exposed by host. */
|
||||
KeMemoryBarrier();
|
||||
|
||||
last_used = vq->last_used_idx;
|
||||
id = vq->packed.vring.desc[last_used].id;
|
||||
*len = vq->packed.vring.desc[last_used].len;
|
||||
|
||||
if (id >= vq->packed.vring.num) {
|
||||
BAD_RING(vq, "id %u out of range\n", id);
|
||||
return NULL;
|
||||
}
|
||||
if (!vq->packed.desc_state[id].data) {
|
||||
BAD_RING(vq, "id %u is not a head!\n", id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* detach_buf_packed clears data, so grab it now. */
|
||||
ret = vq->packed.desc_state[id].data;
|
||||
detach_buf_packed(vq, id);
|
||||
|
||||
vq->last_used_idx += vq->packed.desc_state[id].num;
|
||||
if (vq->last_used_idx >= vq->packed.vring.num) {
|
||||
vq->last_used_idx -= (u16)vq->packed.vring.num;
|
||||
vq->packed.used_wrap_counter ^= 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we expect an interrupt for the next entry, tell host
|
||||
* by writing event index and flush out the write before
|
||||
* the read in the next get_buf call.
|
||||
*/
|
||||
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) {
|
||||
vq->packed.vring.driver->off_wrap = vq->last_used_idx |
|
||||
((u16)vq->packed.used_wrap_counter <<
|
||||
VRING_PACKED_EVENT_F_WRAP_CTR);
|
||||
KeMemoryBarrier();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static BOOLEAN virtqueue_has_buf_packed(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
return more_used_packed(vq);
|
||||
}
|
||||
|
||||
static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
u16 new, old, off_wrap, flags, wrap_counter, event_idx;
|
||||
bool needs_kick;
|
||||
union {
|
||||
struct {
|
||||
__le16 off_wrap;
|
||||
__le16 flags;
|
||||
};
|
||||
u32 value32;
|
||||
} snapshot;
|
||||
|
||||
/*
|
||||
* We need to expose the new flags value before checking notification
|
||||
* suppressions.
|
||||
*/
|
||||
KeMemoryBarrier();
|
||||
|
||||
old = vq->packed.next_avail_idx - vq->num_added;
|
||||
new = vq->packed.next_avail_idx;
|
||||
vq->num_added = 0;
|
||||
|
||||
snapshot.value32 = *(u32 *)vq->packed.vring.device;
|
||||
flags = snapshot.flags;
|
||||
|
||||
if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
|
||||
needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
off_wrap = snapshot.off_wrap;
|
||||
|
||||
wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
|
||||
event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
|
||||
if (wrap_counter != vq->packed.avail_wrap_counter)
|
||||
event_idx -= (u16)vq->packed.vring.num;
|
||||
|
||||
needs_kick = vring_need_event(event_idx, new, old);
|
||||
out:
|
||||
return needs_kick;
|
||||
}
|
||||
|
||||
static void virtqueue_kick_always_packed(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(_vq);
|
||||
KeMemoryBarrier();
|
||||
vq->num_added = 0;
|
||||
virtqueue_notify(_vq);
|
||||
}
|
||||
|
||||
/* Initializes a new virtqueue using already allocated memory */
|
||||
struct virtqueue *vring_new_virtqueue_packed(
|
||||
unsigned int index, /* virtqueue index */
|
||||
unsigned int num, /* virtqueue size (always a power of 2) */
|
||||
unsigned int vring_align, /* vring alignment requirement */
|
||||
VirtIODevice *vdev, /* the virtio device owning the queue */
|
||||
void *pages, /* vring memory */
|
||||
void(*notify)(struct virtqueue *), /* notification callback */
|
||||
void *control) /* virtqueue memory */
|
||||
{
|
||||
struct virtqueue_packed *vq = packedvq(control);
|
||||
unsigned int i;
|
||||
|
||||
vq->vq.vdev = vdev;
|
||||
vq->vq.notification_cb = notify;
|
||||
vq->vq.index = index;
|
||||
|
||||
vq->vq.avail_va = (u8 *)pages + num * sizeof(struct vring_packed_desc);
|
||||
vq->vq.used_va = (u8 *)vq->vq.avail_va + sizeof(struct vring_packed_desc_event);
|
||||
|
||||
/* initialize the ring */
|
||||
vq->packed.vring.num = num;
|
||||
vq->packed.vring.desc = pages;
|
||||
vq->packed.vring.driver = vq->vq.avail_va;
|
||||
vq->packed.vring.device = vq->vq.used_va;
|
||||
|
||||
vq->num_free = num;
|
||||
vq->free_head = 0;
|
||||
vq->num_added = 0;
|
||||
vq->packed.avail_wrap_counter = 1;
|
||||
vq->packed.used_wrap_counter = 1;
|
||||
vq->last_used_idx = 0;
|
||||
vq->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
|
||||
vq->packed.next_avail_idx = 0;
|
||||
vq->packed.event_flags_shadow = 0;
|
||||
vq->packed.desc_state = vq->desc_states;
|
||||
|
||||
RtlZeroMemory(vq->packed.desc_state, num * sizeof(*vq->packed.desc_state));
|
||||
for (i = 0; i < num - 1; i++) {
|
||||
vq->packed.desc_state[i].next = i + 1;
|
||||
}
|
||||
|
||||
vq->vq.add_buf = virtqueue_add_buf_packed;
|
||||
vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_packed;
|
||||
vq->vq.disable_cb = virtqueue_disable_cb_packed;
|
||||
vq->vq.enable_cb = virtqueue_enable_cb_packed;
|
||||
vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_packed;
|
||||
vq->vq.get_buf = virtqueue_get_buf_packed;
|
||||
vq->vq.has_buf = virtqueue_has_buf_packed;
|
||||
vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_packed;
|
||||
vq->vq.kick_always = virtqueue_kick_always_packed;
|
||||
vq->vq.kick_prepare = virtqueue_kick_prepare_packed;
|
||||
vq->vq.shutdown = virtqueue_shutdown_packed;
|
||||
return &vq->vq;
|
||||
}
|
|
@ -1,562 +0,0 @@
|
|||
/*
|
||||
* Virtio ring manipulation routines
|
||||
*
|
||||
* Copyright 2017 Red Hat, Inc.
|
||||
*
|
||||
* Authors:
|
||||
* Ladi Prosek <lprosek@redhat.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met :
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and / or other materials provided with the distribution.
|
||||
* 3. Neither the names of the copyright holders nor the names of their contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
#include "osdep.h"
|
||||
#include "virtio_pci.h"
|
||||
#include "VirtIO.h"
|
||||
#include "kdebugprint.h"
|
||||
#include "virtio_ring.h"
|
||||
#include "windows/virtio_ring_allocation.h"
|
||||
|
||||
#define DESC_INDEX(num, i) ((i) & ((num) - 1))
|
||||
|
||||
/* This marks a buffer as continuing via the next field. */
|
||||
#define VIRTQ_DESC_F_NEXT 1
|
||||
/* This marks a buffer as write-only (otherwise read-only). */
|
||||
#define VIRTQ_DESC_F_WRITE 2
|
||||
/* This means the buffer contains a list of buffer descriptors. */
|
||||
#define VIRTQ_DESC_F_INDIRECT 4
|
||||
|
||||
/* The Host uses this in used->flags to advise the Guest: don't kick me when
|
||||
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
|
||||
* will still kick if it's out of buffers. */
|
||||
#define VIRTQ_USED_F_NO_NOTIFY 1
|
||||
/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
|
||||
* when you consume a buffer. It's unreliable, so it's simply an
|
||||
* optimization. */
|
||||
#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
|
||||
|
||||
#pragma warning (push)
|
||||
#pragma warning (disable:4200)
|
||||
|
||||
#include <pshpack1.h>
|
||||
|
||||
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
|
||||
struct vring_desc {
|
||||
/* Address (guest-physical). */
|
||||
__virtio64 addr;
|
||||
/* Length. */
|
||||
__virtio32 len;
|
||||
/* The flags as indicated above. */
|
||||
__virtio16 flags;
|
||||
/* We chain unused descriptors via this, too */
|
||||
__virtio16 next;
|
||||
};
|
||||
|
||||
struct vring_avail {
|
||||
__virtio16 flags;
|
||||
__virtio16 idx;
|
||||
__virtio16 ring[];
|
||||
};
|
||||
|
||||
/* u32 is used here for ids for padding reasons. */
|
||||
struct vring_used_elem {
|
||||
/* Index of start of used descriptor chain. */
|
||||
__virtio32 id;
|
||||
/* Total length of the descriptor chain which was used (written to) */
|
||||
__virtio32 len;
|
||||
};
|
||||
|
||||
struct vring_used {
|
||||
__virtio16 flags;
|
||||
__virtio16 idx;
|
||||
struct vring_used_elem ring[];
|
||||
};
|
||||
|
||||
#include <poppack.h>
|
||||
|
||||
/* Alignment requirements for vring elements.
|
||||
* When using pre-virtio 1.0 layout, these fall out naturally.
|
||||
*/
|
||||
#define VRING_AVAIL_ALIGN_SIZE 2
|
||||
#define VRING_USED_ALIGN_SIZE 4
|
||||
#define VRING_DESC_ALIGN_SIZE 16
|
||||
|
||||
/* The standard layout for the ring is a continuous chunk of memory which looks
|
||||
* like this. We assume num is a power of 2.
|
||||
*
|
||||
* struct vring
|
||||
* {
|
||||
* // The actual descriptors (16 bytes each)
|
||||
* struct vring_desc desc[num];
|
||||
*
|
||||
* // A ring of available descriptor heads with free-running index.
|
||||
* __virtio16 avail_flags;
|
||||
* __virtio16 avail_idx;
|
||||
* __virtio16 available[num];
|
||||
* __virtio16 used_event_idx;
|
||||
*
|
||||
* // Padding to the next align boundary.
|
||||
* char pad[];
|
||||
*
|
||||
* // A ring of used descriptor heads with free-running index.
|
||||
* __virtio16 used_flags;
|
||||
* __virtio16 used_idx;
|
||||
* struct vring_used_elem used[num];
|
||||
* __virtio16 avail_event_idx;
|
||||
* };
|
||||
*/
|
||||
/* We publish the used event index at the end of the available ring, and vice
|
||||
* versa. They are at the end for backwards compatibility. */
|
||||
|
||||
struct vring {
|
||||
unsigned int num;
|
||||
|
||||
struct vring_desc *desc;
|
||||
|
||||
struct vring_avail *avail;
|
||||
|
||||
struct vring_used *used;
|
||||
};
|
||||
|
||||
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
|
||||
#define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
|
||||
|
||||
static inline void vring_init(struct vring *vr, unsigned int num, void *p,
|
||||
unsigned long align)
|
||||
{
|
||||
vr->num = num;
|
||||
vr->desc = (struct vring_desc *)p;
|
||||
vr->avail = (struct vring_avail *)((__u8 *)p + num * sizeof(struct vring_desc));
|
||||
vr->used = (struct vring_used *)(((ULONG_PTR)&vr->avail->ring[num] + sizeof(__virtio16)
|
||||
+ align - 1) & ~((ULONG_PTR)align - 1));
|
||||
}
|
||||
|
||||
static inline unsigned vring_size_split(unsigned int num, unsigned long align)
|
||||
{
|
||||
#pragma warning (push)
|
||||
#pragma warning (disable:4319)
|
||||
return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num)
|
||||
+ align - 1) & ~(align - 1))
|
||||
+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
|
||||
#pragma warning(pop)
|
||||
}
|
||||
|
||||
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
|
||||
/* Assuming a given event_idx value from the other side, if
|
||||
* we have just incremented index from old to new_idx,
|
||||
* should we trigger an event? */
|
||||
static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
|
||||
{
|
||||
/* Note: Xen has similar logic for notification hold-off
|
||||
* in include/xen/interface/io/ring.h with req_event and req_prod
|
||||
* corresponding to event_idx + 1 and new_idx respectively.
|
||||
* Note also that req_event and req_prod in Xen start at 1,
|
||||
* event indexes in virtio start at 0. */
|
||||
return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
|
||||
}
|
||||
|
||||
struct virtqueue_split {
|
||||
struct virtqueue vq;
|
||||
struct vring vring;
|
||||
struct {
|
||||
u16 flags;
|
||||
u16 idx;
|
||||
} master_vring_avail;
|
||||
unsigned int num_unused;
|
||||
unsigned int num_added_since_kick;
|
||||
u16 first_unused;
|
||||
u16 last_used;
|
||||
void *opaque[];
|
||||
};
|
||||
|
||||
#define splitvq(vq) ((struct virtqueue_split *)vq)
|
||||
|
||||
#pragma warning (pop)
|
||||
|
||||
/* Returns the index of the first unused descriptor */
|
||||
static inline u16 get_unused_desc(struct virtqueue_split *vq)
|
||||
{
|
||||
u16 idx = vq->first_unused;
|
||||
ASSERT(vq->num_unused > 0);
|
||||
|
||||
vq->first_unused = vq->vring.desc[idx].next;
|
||||
vq->num_unused--;
|
||||
return idx;
|
||||
}
|
||||
|
||||
/* Marks the descriptor chain starting at index idx as unused */
|
||||
static inline void put_unused_desc_chain(struct virtqueue_split *vq, u16 idx)
|
||||
{
|
||||
u16 start = idx;
|
||||
|
||||
vq->opaque[idx] = NULL;
|
||||
while (vq->vring.desc[idx].flags & VIRTQ_DESC_F_NEXT) {
|
||||
idx = vq->vring.desc[idx].next;
|
||||
vq->num_unused++;
|
||||
}
|
||||
|
||||
vq->vring.desc[idx].flags = VIRTQ_DESC_F_NEXT;
|
||||
vq->vring.desc[idx].next = vq->first_unused;
|
||||
vq->num_unused++;
|
||||
|
||||
vq->first_unused = start;
|
||||
}
|
||||
|
||||
/* Adds a buffer to a virtqueue, returns 0 on success, negative number on error */
|
||||
static int virtqueue_add_buf_split(
|
||||
struct virtqueue *_vq, /* the queue */
|
||||
struct scatterlist sg[], /* sg array of length out + in */
|
||||
unsigned int out, /* number of driver->device buffer descriptors in sg */
|
||||
unsigned int in, /* number of device->driver buffer descriptors in sg */
|
||||
void *opaque, /* later returned from virtqueue_get_buf */
|
||||
void *va_indirect, /* VA of the indirect page or NULL */
|
||||
ULONGLONG phys_indirect) /* PA of the indirect page or 0 */
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
struct vring *vring = &vq->vring;
|
||||
unsigned int i;
|
||||
u16 idx;
|
||||
|
||||
if (va_indirect && (out + in) > 1 && vq->num_unused > 0) {
|
||||
/* Use one indirect descriptor */
|
||||
struct vring_desc *desc = (struct vring_desc *)va_indirect;
|
||||
|
||||
for (i = 0; i < out + in; i++) {
|
||||
desc[i].flags = (i < out ? 0 : VIRTQ_DESC_F_WRITE);
|
||||
desc[i].flags |= VIRTQ_DESC_F_NEXT;
|
||||
desc[i].addr = sg[i].physAddr.QuadPart;
|
||||
desc[i].len = sg[i].length;
|
||||
desc[i].next = (u16)i + 1;
|
||||
}
|
||||
desc[i - 1].flags &= ~VIRTQ_DESC_F_NEXT;
|
||||
|
||||
idx = get_unused_desc(vq);
|
||||
vq->vring.desc[idx].flags = VIRTQ_DESC_F_INDIRECT;
|
||||
vq->vring.desc[idx].addr = phys_indirect;
|
||||
vq->vring.desc[idx].len = i * sizeof(struct vring_desc);
|
||||
|
||||
vq->opaque[idx] = opaque;
|
||||
} else {
|
||||
u16 last_idx;
|
||||
|
||||
/* Use out + in regular descriptors */
|
||||
if (out + in > vq->num_unused) {
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/* First descriptor */
|
||||
idx = last_idx = get_unused_desc(vq);
|
||||
vq->opaque[idx] = opaque;
|
||||
|
||||
vring->desc[idx].addr = sg[0].physAddr.QuadPart;
|
||||
vring->desc[idx].len = sg[0].length;
|
||||
vring->desc[idx].flags = VIRTQ_DESC_F_NEXT;
|
||||
if (out == 0) {
|
||||
vring->desc[idx].flags |= VIRTQ_DESC_F_WRITE;
|
||||
}
|
||||
vring->desc[idx].next = vq->first_unused;
|
||||
|
||||
/* The rest of descriptors */
|
||||
for (i = 1; i < out + in; i++) {
|
||||
last_idx = get_unused_desc(vq);
|
||||
|
||||
vring->desc[last_idx].addr = sg[i].physAddr.QuadPart;
|
||||
vring->desc[last_idx].len = sg[i].length;
|
||||
vring->desc[last_idx].flags = VIRTQ_DESC_F_NEXT;
|
||||
if (i >= out) {
|
||||
vring->desc[last_idx].flags |= VIRTQ_DESC_F_WRITE;
|
||||
}
|
||||
vring->desc[last_idx].next = vq->first_unused;
|
||||
}
|
||||
vring->desc[last_idx].flags &= ~VIRTQ_DESC_F_NEXT;
|
||||
}
|
||||
|
||||
/* Write the first descriptor into the available ring */
|
||||
vring->avail->ring[DESC_INDEX(vring->num, vq->master_vring_avail.idx)] = idx;
|
||||
KeMemoryBarrier();
|
||||
vring->avail->idx = ++vq->master_vring_avail.idx;
|
||||
vq->num_added_since_kick++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Gets the opaque pointer associated with a returned buffer, or NULL if no buffer is available */
|
||||
static void *virtqueue_get_buf_split(
|
||||
struct virtqueue *_vq, /* the queue */
|
||||
unsigned int *len) /* number of bytes returned by the device */
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
void *opaque;
|
||||
u16 idx;
|
||||
|
||||
if (vq->last_used == (int)vq->vring.used->idx) {
|
||||
/* No descriptor index in the used ring */
|
||||
return NULL;
|
||||
}
|
||||
KeMemoryBarrier();
|
||||
|
||||
idx = DESC_INDEX(vq->vring.num, vq->last_used);
|
||||
*len = vq->vring.used->ring[idx].len;
|
||||
|
||||
/* Get the first used descriptor */
|
||||
idx = (u16)vq->vring.used->ring[idx].id;
|
||||
opaque = vq->opaque[idx];
|
||||
|
||||
/* Put all descriptors back to the free list */
|
||||
put_unused_desc_chain(vq, idx);
|
||||
|
||||
vq->last_used++;
|
||||
if (_vq->vdev->event_suppression_enabled && virtqueue_is_interrupt_enabled(_vq)) {
|
||||
vring_used_event(&vq->vring) = vq->last_used;
|
||||
KeMemoryBarrier();
|
||||
}
|
||||
|
||||
ASSERT(opaque != NULL);
|
||||
return opaque;
|
||||
}
|
||||
|
||||
/* Returns true if at least one returned buffer is available, false otherwise */
|
||||
static BOOLEAN virtqueue_has_buf_split(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
return (vq->last_used != vq->vring.used->idx);
|
||||
}
|
||||
|
||||
/* Returns true if the device should be notified, false otherwise */
|
||||
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
bool wrap_around;
|
||||
u16 old, new;
|
||||
KeMemoryBarrier();
|
||||
|
||||
wrap_around = (vq->num_added_since_kick >= (1 << 16));
|
||||
|
||||
old = (u16)(vq->master_vring_avail.idx - vq->num_added_since_kick);
|
||||
new = vq->master_vring_avail.idx;
|
||||
vq->num_added_since_kick = 0;
|
||||
|
||||
if (_vq->vdev->event_suppression_enabled) {
|
||||
return wrap_around || (bool)vring_need_event(vring_avail_event(&vq->vring), new, old);
|
||||
} else {
|
||||
return !(vq->vring.used->flags & VIRTQ_USED_F_NO_NOTIFY);
|
||||
}
|
||||
}
|
||||
|
||||
/* Notifies the device even if it's not necessary according to the event suppression logic */
|
||||
static void virtqueue_kick_always_split(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
KeMemoryBarrier();
|
||||
vq->num_added_since_kick = 0;
|
||||
virtqueue_notify(_vq);
|
||||
}
|
||||
|
||||
/* Enables interrupts on a virtqueue and returns false if the queue has at least one returned
|
||||
* buffer available to be fetched by virtqueue_get_buf, true otherwise */
|
||||
static bool virtqueue_enable_cb_split(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
if (!virtqueue_is_interrupt_enabled(_vq)) {
|
||||
vq->master_vring_avail.flags &= ~VIRTQ_AVAIL_F_NO_INTERRUPT;
|
||||
if (!_vq->vdev->event_suppression_enabled)
|
||||
{
|
||||
vq->vring.avail->flags = vq->master_vring_avail.flags;
|
||||
}
|
||||
}
|
||||
|
||||
vring_used_event(&vq->vring) = vq->last_used;
|
||||
KeMemoryBarrier();
|
||||
return (vq->last_used == vq->vring.used->idx);
|
||||
}
|
||||
|
||||
/* Enables interrupts on a virtqueue after ~3/4 of the currently pushed buffers have been
|
||||
* returned, returns false if this condition currently holds, false otherwise */
|
||||
static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
u16 bufs;
|
||||
|
||||
if (!virtqueue_is_interrupt_enabled(_vq)) {
|
||||
vq->master_vring_avail.flags &= ~VIRTQ_AVAIL_F_NO_INTERRUPT;
|
||||
if (!_vq->vdev->event_suppression_enabled)
|
||||
{
|
||||
vq->vring.avail->flags = vq->master_vring_avail.flags;
|
||||
}
|
||||
}
|
||||
|
||||
/* Note that 3/4 is an arbitrary threshold */
|
||||
bufs = (u16)(vq->master_vring_avail.idx - vq->last_used) * 3 / 4;
|
||||
vring_used_event(&vq->vring) = vq->last_used + bufs;
|
||||
KeMemoryBarrier();
|
||||
return ((vq->vring.used->idx - vq->last_used) <= bufs);
|
||||
}
|
||||
|
||||
/* Disables interrupts on a virtqueue */
|
||||
static void virtqueue_disable_cb_split(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
if (virtqueue_is_interrupt_enabled(_vq)) {
|
||||
vq->master_vring_avail.flags |= VIRTQ_AVAIL_F_NO_INTERRUPT;
|
||||
if (!_vq->vdev->event_suppression_enabled)
|
||||
{
|
||||
vq->vring.avail->flags = vq->master_vring_avail.flags;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns true if interrupts are enabled on a virtqueue, false otherwise */
|
||||
static BOOLEAN virtqueue_is_interrupt_enabled_split(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
return !(vq->master_vring_avail.flags & VIRTQ_AVAIL_F_NO_INTERRUPT);
|
||||
}
|
||||
|
||||
/* Re-initializes an already initialized virtqueue */
|
||||
static void virtqueue_shutdown_split(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
unsigned int num = vq->vring.num;
|
||||
void *pages = vq->vring.desc;
|
||||
unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES;
|
||||
|
||||
RtlZeroMemory(pages, vring_size_split(num, vring_align));
|
||||
(void)vring_new_virtqueue_split(
|
||||
_vq->index,
|
||||
vq->vring.num,
|
||||
vring_align,
|
||||
_vq->vdev,
|
||||
pages,
|
||||
_vq->notification_cb,
|
||||
vq);
|
||||
}
|
||||
|
||||
/* Gets the opaque pointer associated with a not-yet-returned buffer, or NULL if no buffer is available
|
||||
* to aid drivers with cleaning up all data on virtqueue shutdown */
|
||||
static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(_vq);
|
||||
u16 idx;
|
||||
void *opaque = NULL;
|
||||
|
||||
for (idx = 0; idx < (u16)vq->vring.num; idx++) {
|
||||
opaque = vq->opaque[idx];
|
||||
if (opaque) {
|
||||
put_unused_desc_chain(vq, idx);
|
||||
vq->vring.avail->idx = --vq->master_vring_avail.idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return opaque;
|
||||
}
|
||||
|
||||
/* Returns the size of the virtqueue structure including
|
||||
* additional size for per-descriptor data */
|
||||
unsigned int vring_control_block_size(u16 qsize, bool packed)
|
||||
{
|
||||
unsigned int res;
|
||||
if (packed) {
|
||||
return vring_control_block_size_packed(qsize);
|
||||
}
|
||||
res = sizeof(struct virtqueue_split);
|
||||
res += sizeof(void *) * qsize;
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Initializes a new virtqueue using already allocated memory */
|
||||
struct virtqueue *vring_new_virtqueue_split(
|
||||
unsigned int index, /* virtqueue index */
|
||||
unsigned int num, /* virtqueue size (always a power of 2) */
|
||||
unsigned int vring_align, /* vring alignment requirement */
|
||||
VirtIODevice *vdev, /* the virtio device owning the queue */
|
||||
void *pages, /* vring memory */
|
||||
void(*notify)(struct virtqueue *), /* notification callback */
|
||||
void *control) /* virtqueue memory */
|
||||
{
|
||||
struct virtqueue_split *vq = splitvq(control);
|
||||
u16 i;
|
||||
|
||||
if (DESC_INDEX(num, num) != 0) {
|
||||
DPrintf(0, "Virtqueue length %u is not a power of 2\n", num);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
RtlZeroMemory(vq, sizeof(*vq) + num * sizeof(void *));
|
||||
|
||||
vring_init(&vq->vring, num, pages, vring_align);
|
||||
vq->vq.vdev = vdev;
|
||||
vq->vq.notification_cb = notify;
|
||||
vq->vq.index = index;
|
||||
|
||||
/* Build a linked list of unused descriptors */
|
||||
vq->num_unused = num;
|
||||
vq->first_unused = 0;
|
||||
for (i = 0; i < num - 1; i++) {
|
||||
vq->vring.desc[i].flags = VIRTQ_DESC_F_NEXT;
|
||||
vq->vring.desc[i].next = i + 1;
|
||||
}
|
||||
vq->vq.avail_va = vq->vring.avail;
|
||||
vq->vq.used_va = vq->vring.used;
|
||||
vq->vq.add_buf = virtqueue_add_buf_split;
|
||||
vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_split;
|
||||
vq->vq.disable_cb = virtqueue_disable_cb_split;
|
||||
vq->vq.enable_cb = virtqueue_enable_cb_split;
|
||||
vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_split;
|
||||
vq->vq.get_buf = virtqueue_get_buf_split;
|
||||
vq->vq.has_buf = virtqueue_has_buf_split;
|
||||
vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_split;
|
||||
vq->vq.kick_always = virtqueue_kick_always_split;
|
||||
vq->vq.kick_prepare = virtqueue_kick_prepare_split;
|
||||
vq->vq.shutdown = virtqueue_shutdown_split;
|
||||
return &vq->vq;
|
||||
}
|
||||
|
||||
/* Negotiates virtio transport features */
|
||||
void vring_transport_features(
|
||||
VirtIODevice *vdev,
|
||||
u64 *features) /* points to device features on entry and driver accepted features on return */
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
|
||||
if (i != VIRTIO_RING_F_INDIRECT_DESC &&
|
||||
i != VIRTIO_RING_F_EVENT_IDX &&
|
||||
i != VIRTIO_F_VERSION_1) {
|
||||
virtio_feature_disable(*features, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns the max number of scatter-gather elements that fit in an indirect pages */
|
||||
u32 virtio_get_indirect_page_capacity()
|
||||
{
|
||||
return PAGE_SIZE / sizeof(struct vring_desc);
|
||||
}
|
||||
|
||||
unsigned long vring_size(unsigned int num, unsigned long align, bool packed)
|
||||
{
|
||||
if (packed) {
|
||||
return vring_size_packed(num, align);
|
||||
} else {
|
||||
return vring_size_split(num, align);
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
extern int virtioDebugLevel;
|
||||
extern int bDebugPrint;
|
||||
typedef void (*tDebugPrintFunc)(const char *format, ...);
|
||||
extern tDebugPrintFunc VirtioDebugPrintProc;
|
||||
|
||||
#define DPrintf(Level, MSG, ...) if ((!bDebugPrint) || Level > virtioDebugLevel) {} else VirtioDebugPrintProc(MSG, __VA_ARGS__)
|
||||
|
||||
#define DEBUG_ENTRY(level) DPrintf(level, "[%s]=>\n", __FUNCTION__)
|
||||
#define DEBUG_EXIT_STATUS(level, status) DPrintf((status == NDIS_STATUS_SUCCESS ? level : 0), "[%s]<=0x%X\n", __FUNCTION__, (status))
|
|
@ -1,19 +0,0 @@
|
|||
#ifndef _LINUX_TYPES_H
|
||||
#define _LINUX_TYPES_H
|
||||
|
||||
#define __bitwise__
|
||||
// #define __attribute__(x)
|
||||
|
||||
#define u8 unsigned char
|
||||
#define u16 unsigned short
|
||||
#define u32 unsigned long
|
||||
#define u64 ULONGLONG
|
||||
|
||||
#define __u8 unsigned char
|
||||
#define __u16 unsigned short
|
||||
#define __le16 unsigned short
|
||||
#define __u32 unsigned long
|
||||
#define __le32 unsigned long
|
||||
#define __u64 ULONGLONG
|
||||
|
||||
#endif /* _LINUX_TYPES_H */
|
|
@ -1,73 +0,0 @@
|
|||
#ifndef _UAPI_LINUX_VIRTIO_CONFIG_H
|
||||
#define _UAPI_LINUX_VIRTIO_CONFIG_H
|
||||
/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
|
||||
* anyone can use the definitions to implement compatible drivers/servers.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of IBM nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE. */
|
||||
|
||||
/* Virtio devices use a standardized configuration space to define their
|
||||
* features and pass configuration information, but each implementation can
|
||||
* store and access that space differently. */
|
||||
|
||||
/* Status byte for guest to report progress, and synchronize features. */
|
||||
/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
|
||||
#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
|
||||
/* We have found a driver for the device. */
|
||||
#define VIRTIO_CONFIG_S_DRIVER 2
|
||||
/* Driver has used its parts of the config, and is happy */
|
||||
#define VIRTIO_CONFIG_S_DRIVER_OK 4
|
||||
/* Driver has finished configuring features */
|
||||
#define VIRTIO_CONFIG_S_FEATURES_OK 8
|
||||
/* Device entered invalid state, driver SHOULD reset it */
|
||||
#define VIRTIO_CONFIG_S_NEEDS_RESET 0x40
|
||||
/* We've given up on this device. */
|
||||
#define VIRTIO_CONFIG_S_FAILED 0x80
|
||||
/* virtio library features bits */
|
||||
|
||||
|
||||
/* Some virtio feature bits (currently bits 28 through 31) are reserved for the
|
||||
* transport being used (eg. virtio_ring), the rest are per-device feature
|
||||
* bits. */
|
||||
#define VIRTIO_TRANSPORT_F_START 28
|
||||
#define VIRTIO_TRANSPORT_F_END 32
|
||||
|
||||
/* Do we get callbacks when the ring is completely used, even if we've
|
||||
* suppressed them? */
|
||||
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
|
||||
|
||||
/* Can the device handle any descriptor layout? */
|
||||
#define VIRTIO_F_ANY_LAYOUT 27
|
||||
|
||||
/* v1.0 compliant. */
|
||||
#define VIRTIO_F_VERSION_1 32
|
||||
|
||||
#define VIRTIO_F_IOMMU_PLATFORM 33
|
||||
|
||||
/* This feature indicates support for the packed virtqueue layout. */
|
||||
#define VIRTIO_F_RING_PACKED 34
|
||||
|
||||
// if this number is not equal to desc size, queue creation fails
|
||||
#define SIZE_OF_SINGLE_INDIRECT_DESC 16
|
||||
|
||||
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
|
|
@ -1,47 +0,0 @@
|
|||
#ifndef _UAPI_LINUX_VIRTIO_TYPES_H
|
||||
#define _UAPI_LINUX_VIRTIO_TYPES_H
|
||||
/* Type definitions for virtio implementations.
|
||||
*
|
||||
* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of IBM nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* Copyright (C) 2014 Red Hat, Inc.
|
||||
* Author: Michael S. Tsirkin <mst@redhat.com>
|
||||
*/
|
||||
|
||||
#include "linux/types.h"
|
||||
|
||||
/*
|
||||
* __virtio{16,32,64} have the following meaning:
|
||||
* - __u{16,32,64} for virtio devices in legacy mode, accessed in native endian
|
||||
* - __le{16,32,64} for standard-compliant virtio devices
|
||||
*/
|
||||
|
||||
typedef __u16 __bitwise__ __virtio16;
|
||||
typedef __u32 __bitwise__ __virtio32;
|
||||
typedef __u64 __bitwise__ __virtio64;
|
||||
|
||||
#endif /* _UAPI_LINUX_VIRTIO_TYPES_H */
|
|
@ -1,39 +0,0 @@
|
|||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Copyright (c) 2007 Qumranet All Rights Reserved
|
||||
//
|
||||
// Module Name:
|
||||
// osdep.h
|
||||
//
|
||||
// Abstract:
|
||||
// Windows OS dependent definitions of data types
|
||||
//
|
||||
// Author:
|
||||
// Yan Vugenfirer - February 2007.
|
||||
//
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ntddk.h>
|
||||
|
||||
#ifdef __REACTOS__
|
||||
#ifdef __GNUC__
|
||||
#undef FORCEINLINE
|
||||
#define FORCEINLINE __attribute__((__always_inline__))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef __REACTOS__
|
||||
#define ENOSPC 1
|
||||
#endif
|
||||
|
||||
#if !defined(__cplusplus) && !defined(bool)
|
||||
// Important note: in MSFT C++ bool length is 1 bytes
|
||||
// C++ does not define length of bool
|
||||
// inconsistent definition of 'bool' may create compatibility problems
|
||||
#define bool u8
|
||||
#define false FALSE
|
||||
#define true TRUE
|
||||
#endif
|
||||
|
||||
#define SMP_CACHE_BYTES 64
|
|
@ -1,392 +0,0 @@
|
|||
/*
|
||||
* Virtio PCI driver
|
||||
*
|
||||
* This module allows virtio devices to be used over a virtual PCI device.
|
||||
* This can be used with QEMU based VMMs like KVM or Xen.
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
*
|
||||
* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of IBM nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_VIRTIO_PCI_H
|
||||
#define _LINUX_VIRTIO_PCI_H
|
||||
|
||||
#include "linux/types.h"
|
||||
#include "linux/virtio_config.h"
|
||||
|
||||
#ifndef VIRTIO_PCI_NO_LEGACY
|
||||
|
||||
/* A 32-bit r/o bitmask of the features supported by the host */
|
||||
#define VIRTIO_PCI_HOST_FEATURES 0
|
||||
|
||||
/* A 32-bit r/w bitmask of features activated by the guest */
|
||||
#define VIRTIO_PCI_GUEST_FEATURES 4
|
||||
|
||||
/* A 32-bit r/w PFN for the currently selected queue */
|
||||
#define VIRTIO_PCI_QUEUE_PFN 8
|
||||
|
||||
/* A 16-bit r/o queue size for the currently selected queue */
|
||||
#define VIRTIO_PCI_QUEUE_NUM 12
|
||||
|
||||
/* A 16-bit r/w queue selector */
|
||||
#define VIRTIO_PCI_QUEUE_SEL 14
|
||||
|
||||
/* A 16-bit r/w queue notifier */
|
||||
#define VIRTIO_PCI_QUEUE_NOTIFY 16
|
||||
|
||||
/* An 8-bit device status register. */
|
||||
#define VIRTIO_PCI_STATUS 18
|
||||
|
||||
/* An 8-bit r/o interrupt status register. Reading the value will return the
|
||||
* current contents of the ISR and will also clear it. This is effectively
|
||||
* a read-and-acknowledge. */
|
||||
#define VIRTIO_PCI_ISR 19
|
||||
|
||||
/* MSI-X registers: only enabled if MSI-X is enabled. */
|
||||
/* A 16-bit vector for configuration changes. */
|
||||
#define VIRTIO_MSI_CONFIG_VECTOR 20
|
||||
/* A 16-bit vector for selected queue notifications. */
|
||||
#define VIRTIO_MSI_QUEUE_VECTOR 22
|
||||
|
||||
/* The remaining space is defined by each driver as the per-driver
|
||||
* configuration space */
|
||||
#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20)
|
||||
/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
|
||||
#define VIRTIO_PCI_CONFIG(msix_enabled) VIRTIO_PCI_CONFIG_OFF(msix_enabled)
|
||||
|
||||
/* How many bits to shift physical queue address written to QUEUE_PFN.
|
||||
* 12 is historical, and due to x86 page size. */
|
||||
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
|
||||
|
||||
/* The alignment to use between consumer and producer parts of vring.
|
||||
* x86 pagesize again. */
|
||||
#define VIRTIO_PCI_VRING_ALIGN 4096
|
||||
|
||||
#endif /* VIRTIO_PCI_NO_LEGACY */
|
||||
|
||||
/* The bit of the ISR which indicates a device configuration change. */
|
||||
#define VIRTIO_PCI_ISR_CONFIG 0x2
|
||||
/* Vector value used to disable MSI for queue */
|
||||
#define VIRTIO_MSI_NO_VECTOR 0xffff
|
||||
|
||||
/* IDs for different capabilities. Must all exist. */
|
||||
|
||||
/* Common configuration */
|
||||
#define VIRTIO_PCI_CAP_COMMON_CFG 1
|
||||
/* Notifications */
|
||||
#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
|
||||
/* ISR access */
|
||||
#define VIRTIO_PCI_CAP_ISR_CFG 3
|
||||
/* Device specific configuration */
|
||||
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
|
||||
/* PCI configuration access */
|
||||
#define VIRTIO_PCI_CAP_PCI_CFG 5
|
||||
|
||||
/* This is the PCI capability header: */
|
||||
struct virtio_pci_cap {
|
||||
__u8 cap_vndr; /* Generic PCI field: PCI_CAPABILITY_ID_VENDOR_SPECIFIC */
|
||||
__u8 cap_next; /* Generic PCI field: next ptr. */
|
||||
__u8 cap_len; /* Generic PCI field: capability length */
|
||||
__u8 cfg_type; /* Identifies the structure. */
|
||||
__u8 bar; /* Where to find it. */
|
||||
__u8 padding[3]; /* Pad to full dword. */
|
||||
__le32 offset; /* Offset within bar. */
|
||||
__le32 length; /* Length of the structure, in bytes. */
|
||||
};
|
||||
|
||||
struct virtio_pci_notify_cap {
|
||||
struct virtio_pci_cap cap;
|
||||
__le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
|
||||
};
|
||||
|
||||
/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
|
||||
struct virtio_pci_common_cfg {
|
||||
/* About the whole device. */
|
||||
__le32 device_feature_select; /* read-write */
|
||||
__le32 device_feature; /* read-only */
|
||||
__le32 guest_feature_select; /* read-write */
|
||||
__le32 guest_feature; /* read-write */
|
||||
__le16 msix_config; /* read-write */
|
||||
__le16 num_queues; /* read-only */
|
||||
__u8 device_status; /* read-write */
|
||||
__u8 config_generation; /* read-only */
|
||||
|
||||
/* About a specific virtqueue. */
|
||||
__le16 queue_select; /* read-write */
|
||||
__le16 queue_size; /* read-write, power of 2. */
|
||||
__le16 queue_msix_vector; /* read-write */
|
||||
__le16 queue_enable; /* read-write */
|
||||
__le16 queue_notify_off; /* read-only */
|
||||
__le32 queue_desc_lo; /* read-write */
|
||||
__le32 queue_desc_hi; /* read-write */
|
||||
__le32 queue_avail_lo; /* read-write */
|
||||
__le32 queue_avail_hi; /* read-write */
|
||||
__le32 queue_used_lo; /* read-write */
|
||||
__le32 queue_used_hi; /* read-write */
|
||||
};
|
||||
|
||||
#define MAX_QUEUES_PER_DEVICE_DEFAULT 8
|
||||
|
||||
typedef struct virtio_queue_info
|
||||
{
|
||||
/* the actual virtqueue */
|
||||
struct virtqueue *vq;
|
||||
/* the number of entries in the queue */
|
||||
u16 num;
|
||||
/* the virtual address of the ring queue */
|
||||
void *queue;
|
||||
} VirtIOQueueInfo;
|
||||
|
||||
typedef struct virtio_system_ops {
|
||||
// device register access
|
||||
u8 (*vdev_read_byte)(ULONG_PTR ulRegister);
|
||||
u16 (*vdev_read_word)(ULONG_PTR ulRegister);
|
||||
u32 (*vdev_read_dword)(ULONG_PTR ulRegister);
|
||||
void (*vdev_write_byte)(ULONG_PTR ulRegister, u8 bValue);
|
||||
void (*vdev_write_word)(ULONG_PTR ulRegister, u16 wValue);
|
||||
void (*vdev_write_dword)(ULONG_PTR ulRegister, u32 ulValue);
|
||||
|
||||
// memory management
|
||||
void *(*mem_alloc_contiguous_pages)(void *context, size_t size);
|
||||
void (*mem_free_contiguous_pages)(void *context, void *virt);
|
||||
ULONGLONG (*mem_get_physical_address)(void *context, void *virt);
|
||||
void *(*mem_alloc_nonpaged_block)(void *context, size_t size);
|
||||
void (*mem_free_nonpaged_block)(void *context, void *addr);
|
||||
|
||||
// PCI config space access
|
||||
int (*pci_read_config_byte)(void *context, int where, u8 *bVal);
|
||||
int (*pci_read_config_word)(void *context, int where, u16 *wVal);
|
||||
int (*pci_read_config_dword)(void *context, int where, u32 *dwVal);
|
||||
|
||||
// PCI resource handling
|
||||
size_t (*pci_get_resource_len)(void *context, int bar);
|
||||
void *(*pci_map_address_range)(void *context, int bar, size_t offset, size_t maxlen);
|
||||
|
||||
// misc
|
||||
u16 (*vdev_get_msix_vector)(void *context, int queue);
|
||||
void (*vdev_sleep)(void *context, unsigned int msecs);
|
||||
} VirtIOSystemOps;
|
||||
|
||||
struct virtio_device;
|
||||
typedef struct virtio_device VirtIODevice;
|
||||
|
||||
struct virtio_device_ops
|
||||
{
|
||||
// read/write device config and read config generation counter
|
||||
void (*get_config)(VirtIODevice *vdev, unsigned offset, void *buf, unsigned len);
|
||||
void (*set_config)(VirtIODevice *vdev, unsigned offset, const void *buf, unsigned len);
|
||||
u32 (*get_config_generation)(VirtIODevice *vdev);
|
||||
|
||||
// read/write device status byte and reset the device
|
||||
u8 (*get_status)(VirtIODevice *vdev);
|
||||
void (*set_status)(VirtIODevice *vdev, u8 status);
|
||||
void (*reset)(VirtIODevice *vdev);
|
||||
|
||||
// get/set device feature bits
|
||||
u64 (*get_features)(VirtIODevice *vdev);
|
||||
NTSTATUS (*set_features)(VirtIODevice *vdev, u64 features);
|
||||
|
||||
// set config/queue MSI interrupt vector, returns the new vector
|
||||
u16 (*set_config_vector)(VirtIODevice *vdev, u16 vector);
|
||||
u16 (*set_queue_vector)(struct virtqueue *vq, u16 vector);
|
||||
|
||||
// query virtual queue size and memory requirements
|
||||
NTSTATUS (*query_queue_alloc)(VirtIODevice *vdev,
|
||||
unsigned index, unsigned short *pNumEntries,
|
||||
unsigned long *pRingSize,
|
||||
unsigned long *pHeapSize);
|
||||
|
||||
// allocate and initialize a queue
|
||||
NTSTATUS (*setup_queue)(struct virtqueue **queue,
|
||||
VirtIODevice *vdev, VirtIOQueueInfo *info,
|
||||
unsigned idx, u16 msix_vec);
|
||||
|
||||
// tear down and deallocate a queue
|
||||
void (*delete_queue)(VirtIOQueueInfo *info);
|
||||
};
|
||||
|
||||
struct virtio_device
|
||||
{
|
||||
// the I/O port BAR of the PCI device (legacy virtio devices only)
|
||||
ULONG_PTR addr;
|
||||
|
||||
// true if the device uses MSI interrupts
|
||||
bool msix_used;
|
||||
|
||||
// true if the VIRTIO_RING_F_EVENT_IDX feature flag has been negotiated
|
||||
bool event_suppression_enabled;
|
||||
|
||||
// true if the VIRTIO_F_RING_PACKED feature flag has been negotiated
|
||||
bool packed_ring;
|
||||
|
||||
// internal device operations, implemented separately for legacy and modern
|
||||
const struct virtio_device_ops *device;
|
||||
|
||||
// external callbacks implemented separately by different driver model drivers
|
||||
const struct virtio_system_ops *system;
|
||||
|
||||
// opaque context value passed as first argument to virtio_system_ops callbacks
|
||||
void *DeviceContext;
|
||||
|
||||
// the ISR status field, reading causes the device to de-assert an interrupt
|
||||
volatile u8 *isr;
|
||||
|
||||
// modern virtio device capabilities and related state
|
||||
volatile struct virtio_pci_common_cfg *common;
|
||||
volatile unsigned char *config;
|
||||
volatile unsigned char *notify_base;
|
||||
int notify_map_cap;
|
||||
u32 notify_offset_multiplier;
|
||||
|
||||
size_t config_len;
|
||||
size_t notify_len;
|
||||
|
||||
// maximum number of virtqueues that fit in the memory block pointed to by info
|
||||
ULONG maxQueues;
|
||||
|
||||
// points to inline_info if not more than MAX_QUEUES_PER_DEVICE_DEFAULT queues
|
||||
// are used, or to an external allocation otherwise
|
||||
VirtIOQueueInfo *info;
|
||||
VirtIOQueueInfo inline_info[MAX_QUEUES_PER_DEVICE_DEFAULT];
|
||||
};
|
||||
|
||||
/* Driver API: device init and shutdown
|
||||
* DeviceContext is a driver defined opaque value which will be passed to driver
|
||||
* supplied callbacks described in pSystemOps. pSystemOps must be non-NULL and all
|
||||
* its fields must be non-NULL. msix_used is true if and only if the device is
|
||||
* configured with MSI support.
|
||||
*/
|
||||
NTSTATUS virtio_device_initialize(VirtIODevice *vdev,
|
||||
const VirtIOSystemOps *pSystemOps,
|
||||
void *DeviceContext,
|
||||
bool msix_used);
|
||||
void virtio_device_shutdown(VirtIODevice *vdev);
|
||||
|
||||
/* Driver API: device status manipulation
|
||||
* virtio_set_status should not be called by new drivers. Device status should only
|
||||
* be getting its bits set with virtio_add_status and reset all back to 0 with
|
||||
* virtio_device_reset. virtio_device_ready is a special version of virtio_add_status
|
||||
* which adds the VIRTIO_CONFIG_S_DRIVER_OK status bit.
|
||||
*/
|
||||
u8 virtio_get_status(VirtIODevice *vdev);
|
||||
void virtio_set_status(VirtIODevice *vdev, u8 status);
|
||||
void virtio_add_status(VirtIODevice *vdev, u8 status);
|
||||
|
||||
void virtio_device_reset(VirtIODevice *vdev);
|
||||
void virtio_device_ready(VirtIODevice *vdev);
|
||||
|
||||
/* Driver API: device feature bitmap manipulation
|
||||
* Features passed to virtio_set_features should be a subset of features offered by
|
||||
* the device as returned from virtio_get_features. virtio_set_features sets the
|
||||
* VIRTIO_CONFIG_S_FEATURES_OK status bit if it is supported by the device.
|
||||
*/
|
||||
#define virtio_is_feature_enabled(FeaturesList, Feature) (!!((FeaturesList) & (1ULL << (Feature))))
|
||||
#define virtio_feature_enable(FeaturesList, Feature) ((FeaturesList) |= (1ULL << (Feature)))
|
||||
#define virtio_feature_disable(FeaturesList, Feature) ((FeaturesList) &= ~(1ULL << (Feature)))
|
||||
|
||||
u64 virtio_get_features(VirtIODevice *dev);
|
||||
NTSTATUS virtio_set_features(VirtIODevice *vdev, u64 features);
|
||||
|
||||
/* Driver API: device configuration access
|
||||
* Both virtio_get_config and virtio_set_config support arbitrary values of the len
|
||||
* parameter. Config items of length 1, 2, and 4 are read/written using one access,
|
||||
* length 8 is broken down to two 4 bytes accesses, and any other length is read or
|
||||
* written byte by byte.
|
||||
*/
|
||||
void virtio_get_config(VirtIODevice *vdev, unsigned offset,
|
||||
void *buf, unsigned len);
|
||||
void virtio_set_config(VirtIODevice *vdev, unsigned offset,
|
||||
void *buf, unsigned len);
|
||||
|
||||
/* Driver API: virtqueue setup
|
||||
* virtio_reserve_queue_memory makes VirtioLib reserve memory for its virtqueue
|
||||
* bookkeeping. Drivers should call this function if they intend to set up queues
|
||||
* one by one with virtio_find_queue. virtio_find_queues (plural) internally takes
|
||||
* care of the reservation and virtio_reserve_queue_memory need not be called.
|
||||
* Note that in addition to queue interrupt vectors, virtio_find_queues also sets
|
||||
* up the device config vector as a convenience.
|
||||
* Drivers should treat the returned struct virtqueue pointers as opaque handles.
|
||||
*/
|
||||
NTSTATUS virtio_query_queue_allocation(VirtIODevice *vdev, unsigned index,
|
||||
unsigned short *pNumEntries,
|
||||
unsigned long *pRingSize,
|
||||
unsigned long *pHeapSize);
|
||||
|
||||
NTSTATUS virtio_reserve_queue_memory(VirtIODevice *vdev, unsigned nvqs);
|
||||
|
||||
NTSTATUS virtio_find_queue(VirtIODevice *vdev, unsigned index,
|
||||
struct virtqueue **vq);
|
||||
NTSTATUS virtio_find_queues(VirtIODevice *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[]);
|
||||
|
||||
/* Driver API: virtqueue shutdown
|
||||
* The device must be reset and re-initialized to re-setup queues after they have
|
||||
* been deleted.
|
||||
*/
|
||||
void virtio_delete_queue(struct virtqueue *vq);
|
||||
void virtio_delete_queues(VirtIODevice *vdev);
|
||||
|
||||
/* Driver API: virtqueue query and manipulation
|
||||
* virtio_get_queue_descriptor_size
|
||||
* is useful in situations where the driver has to prepare for the memory allocation
|
||||
* performed by virtio_reserve_queue_memory beforehand.
|
||||
*/
|
||||
|
||||
u32 virtio_get_queue_size(struct virtqueue *vq);
|
||||
unsigned long virtio_get_indirect_page_capacity();
|
||||
|
||||
static ULONG FORCEINLINE virtio_get_queue_descriptor_size()
|
||||
{
|
||||
return sizeof(VirtIOQueueInfo);
|
||||
}
|
||||
|
||||
/* Driver API: interrupt handling
|
||||
* virtio_set_config_vector and virtio_set_queue_vector set the MSI vector used for
|
||||
* device configuration interrupt and queue interrupt, respectively. The driver may
|
||||
* choose to either return the vector from the vdev_get_msix_vector callback (called
|
||||
* as part of queue setup) or call these functions later. Note that setting the vector
|
||||
* may fail which is indicated by the return value of VIRTIO_MSI_NO_VECTOR.
|
||||
* virtio_read_isr_status returns the value of the ISR status register, note that it
|
||||
* is not idempotent, calling the function makes the device de-assert the interrupt.
|
||||
*/
|
||||
u16 virtio_set_config_vector(VirtIODevice *vdev, u16 vector);
|
||||
u16 virtio_set_queue_vector(struct virtqueue *vq, u16 vector);
|
||||
|
||||
u8 virtio_read_isr_status(VirtIODevice *vdev);
|
||||
|
||||
/* Driver API: miscellaneous helpers
|
||||
* virtio_get_bar_index returns the corresponding BAR index given its physical address.
|
||||
* This tends to be useful to all drivers since Windows doesn't provide reliable BAR
|
||||
* indices as part of resource enumeration. The function returns -1 on failure.
|
||||
*/
|
||||
int virtio_get_bar_index(PPCI_COMMON_HEADER pPCIHeader, PHYSICAL_ADDRESS BasePA);
|
||||
|
||||
#endif
|
|
@ -1,88 +0,0 @@
|
|||
#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
|
||||
#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
|
||||
/*
|
||||
* Virtio PCI driver - APIs for common functionality for all device versions
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
* Copyright Red Hat, Inc. 2014
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
* Rusty Russell <rusty@rustcorp.com.au>
|
||||
* Michael S. Tsirkin <mst@redhat.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met :
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and / or other materials provided with the distribution.
|
||||
* 3. Neither the names of the copyright holders nor the names of their contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define ioread8(vdev, addr) \
|
||||
vdev->system->vdev_read_byte((ULONG_PTR)(addr))
|
||||
#define ioread16(vdev, addr) \
|
||||
vdev->system->vdev_read_word((ULONG_PTR)(addr))
|
||||
#define ioread32(vdev, addr) \
|
||||
vdev->system->vdev_read_dword((ULONG_PTR)(addr))
|
||||
#define iowrite8(vdev, val, addr) \
|
||||
vdev->system->vdev_write_byte((ULONG_PTR)(addr), val)
|
||||
#define iowrite16(vdev, val, addr) \
|
||||
vdev->system->vdev_write_word((ULONG_PTR)(addr), val)
|
||||
#define iowrite32(vdev, val, addr) \
|
||||
vdev->system->vdev_write_dword((ULONG_PTR)(addr), val)
|
||||
#define iowrite64_twopart(vdev, val, lo_addr, hi_addr) \
|
||||
vdev->system->vdev_write_dword((ULONG_PTR)(lo_addr), (u32)(val)); \
|
||||
vdev->system->vdev_write_dword((ULONG_PTR)(hi_addr), (val) >> 32)
|
||||
|
||||
#define mem_alloc_contiguous_pages(vdev, size) \
|
||||
vdev->system->mem_alloc_contiguous_pages(vdev->DeviceContext, size)
|
||||
#define mem_free_contiguous_pages(vdev, virt) \
|
||||
vdev->system->mem_free_contiguous_pages(vdev->DeviceContext, virt)
|
||||
#define mem_get_physical_address(vdev, virt) \
|
||||
vdev->system->mem_get_physical_address(vdev->DeviceContext, virt)
|
||||
#define mem_alloc_nonpaged_block(vdev, size) \
|
||||
vdev->system->mem_alloc_nonpaged_block(vdev->DeviceContext, size)
|
||||
#define mem_free_nonpaged_block(vdev, addr) \
|
||||
vdev->system->mem_free_nonpaged_block(vdev->DeviceContext, addr)
|
||||
|
||||
#define pci_read_config_byte(vdev, where, bVal) \
|
||||
vdev->system->pci_read_config_byte(vdev->DeviceContext, where, bVal)
|
||||
#define pci_read_config_word(vdev, where, wVal) \
|
||||
vdev->system->pci_read_config_word(vdev->DeviceContext, where, wVal)
|
||||
#define pci_read_config_dword(vdev, where, dwVal) \
|
||||
vdev->system->pci_read_config_dword(vdev->DeviceContext, where, dwVal)
|
||||
|
||||
#define pci_get_resource_len(vdev, bar) \
|
||||
vdev->system->pci_get_resource_len(vdev->DeviceContext, bar)
|
||||
#define pci_map_address_range(vdev, bar, offset, maxlen) \
|
||||
vdev->system->pci_map_address_range(vdev->DeviceContext, bar, offset, maxlen)
|
||||
|
||||
#define vdev_get_msix_vector(vdev, queue) \
|
||||
vdev->system->vdev_get_msix_vector(vdev->DeviceContext, queue)
|
||||
#define vdev_sleep(vdev, msecs) \
|
||||
vdev->system->vdev_sleep(vdev->DeviceContext, msecs)
|
||||
|
||||
/* the notify function used when creating a virt queue */
|
||||
void vp_notify(struct virtqueue *vq);
|
||||
|
||||
NTSTATUS vio_legacy_initialize(VirtIODevice *vdev);
|
||||
NTSTATUS vio_modern_initialize(VirtIODevice *vdev);
|
||||
|
||||
#endif
|
|
@ -1,50 +0,0 @@
|
|||
#ifndef _UAPI_LINUX_VIRTIO_RING_H
|
||||
#define _UAPI_LINUX_VIRTIO_RING_H
|
||||
/* An interface for efficient virtio implementation, currently for use by KVM
|
||||
* and lguest, but hopefully others soon. Do NOT change this since it will
|
||||
* break existing servers and clients.
|
||||
*
|
||||
* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of IBM nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* Copyright Rusty Russell IBM Corporation 2007. */
|
||||
|
||||
#include "linux/types.h"
|
||||
#include "linux/virtio_types.h"
|
||||
|
||||
/* We support indirect buffer descriptors */
|
||||
#define VIRTIO_RING_F_INDIRECT_DESC 28
|
||||
|
||||
/* The Guest publishes the used index for which it expects an interrupt
|
||||
* at the end of the avail ring. Host should ignore the avail->flags field. */
|
||||
/* The Host publishes the avail index for which it expects a kick
|
||||
* at the end of the used ring. Guest should ignore the used->flags field. */
|
||||
#define VIRTIO_RING_F_EVENT_IDX 29
|
||||
|
||||
void vring_transport_features(VirtIODevice *vdev, u64 *features);
|
||||
unsigned long vring_size(unsigned int num, unsigned long align, bool packed);
|
||||
|
||||
#endif /* _UAPI_LINUX_VIRTIO_RING_H */
|
|
@ -1,24 +0,0 @@
|
|||
#ifndef _VIRTIO_RING_ALLOCATION_H
|
||||
#define _VIRTIO_RING_ALLOCATION_H
|
||||
|
||||
struct virtqueue *vring_new_virtqueue_split(unsigned int index,
|
||||
unsigned int num,
|
||||
unsigned int vring_align,
|
||||
VirtIODevice *vdev,
|
||||
void *pages,
|
||||
void (*notify)(struct virtqueue *),
|
||||
void *control);
|
||||
|
||||
struct virtqueue *vring_new_virtqueue_packed(unsigned int index,
|
||||
unsigned int num,
|
||||
unsigned int vring_align,
|
||||
VirtIODevice *vdev,
|
||||
void *pages,
|
||||
void (*notify)(struct virtqueue *),
|
||||
void *control);
|
||||
|
||||
unsigned int vring_control_block_size(u16 qsize, bool packed);
|
||||
unsigned int vring_control_block_size_packed(u16 qsize);
|
||||
unsigned long vring_size_packed(unsigned int num, unsigned long align);
|
||||
|
||||
#endif /* _VIRTIO_RING_ALLOCATION_H */
|
Loading…
Add table
Add a link
Reference in a new issue