vfio-user: implement VFIO_USER_DMA_READ/WRITE

Unlike most other messages, this is a server->client message, for when a
server wants to do "DMA"; this is slow, so normally the server has
memory directly mapped instead.

Originally-by: John Johnson <john.g.johnson@oracle.com>
Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
Signed-off-by: John Levon <john.levon@nutanix.com>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Link: https://lore.kernel.org/qemu-devel/20250625193012.2316242-15-john.levon@nutanix.com
Signed-off-by: Cédric Le Goater <clg@redhat.com>
This commit is contained in:
John Levon 2025-06-25 20:30:06 +01:00 committed by Cédric Le Goater
parent 18e899e63d
commit c6ac52a4d8
4 changed files with 223 additions and 1 deletions

View file

@ -9,6 +9,7 @@
#include <sys/ioctl.h>
#include "qemu/osdep.h"
#include "qapi-visit-sockets.h"
#include "qemu/error-report.h"
#include "hw/qdev-properties.h"
#include "hw/vfio/pci.h"
@ -80,6 +81,95 @@ static void vfio_user_msix_teardown(VFIOPCIDevice *vdev)
vdev->msix->pba_region = NULL;
}
static void vfio_user_dma_read(VFIOPCIDevice *vdev, VFIOUserDMARW *msg)
{
PCIDevice *pdev = &vdev->pdev;
VFIOUserProxy *proxy = vdev->vbasedev.proxy;
VFIOUserDMARW *res;
MemTxResult r;
size_t size;
if (msg->hdr.size < sizeof(*msg)) {
vfio_user_send_error(proxy, &msg->hdr, EINVAL);
return;
}
if (msg->count > proxy->max_xfer_size) {
vfio_user_send_error(proxy, &msg->hdr, E2BIG);
return;
}
/* switch to our own message buffer */
size = msg->count + sizeof(VFIOUserDMARW);
res = g_malloc0(size);
memcpy(res, msg, sizeof(*res));
g_free(msg);
r = pci_dma_read(pdev, res->offset, &res->data, res->count);
switch (r) {
case MEMTX_OK:
if (res->hdr.flags & VFIO_USER_NO_REPLY) {
g_free(res);
return;
}
vfio_user_send_reply(proxy, &res->hdr, size);
break;
case MEMTX_ERROR:
vfio_user_send_error(proxy, &res->hdr, EFAULT);
break;
case MEMTX_DECODE_ERROR:
vfio_user_send_error(proxy, &res->hdr, ENODEV);
break;
case MEMTX_ACCESS_ERROR:
vfio_user_send_error(proxy, &res->hdr, EPERM);
break;
default:
error_printf("vfio_user_dma_read unknown error %d\n", r);
vfio_user_send_error(vdev->vbasedev.proxy, &res->hdr, EINVAL);
}
}
static void vfio_user_dma_write(VFIOPCIDevice *vdev, VFIOUserDMARW *msg)
{
PCIDevice *pdev = &vdev->pdev;
VFIOUserProxy *proxy = vdev->vbasedev.proxy;
MemTxResult r;
if (msg->hdr.size < sizeof(*msg)) {
vfio_user_send_error(proxy, &msg->hdr, EINVAL);
return;
}
/* make sure transfer count isn't larger than the message data */
if (msg->count > msg->hdr.size - sizeof(*msg)) {
vfio_user_send_error(proxy, &msg->hdr, E2BIG);
return;
}
r = pci_dma_write(pdev, msg->offset, &msg->data, msg->count);
switch (r) {
case MEMTX_OK:
if ((msg->hdr.flags & VFIO_USER_NO_REPLY) == 0) {
vfio_user_send_reply(proxy, &msg->hdr, sizeof(msg->hdr));
} else {
g_free(msg);
}
break;
case MEMTX_ERROR:
vfio_user_send_error(proxy, &msg->hdr, EFAULT);
break;
case MEMTX_DECODE_ERROR:
vfio_user_send_error(proxy, &msg->hdr, ENODEV);
break;
case MEMTX_ACCESS_ERROR:
vfio_user_send_error(proxy, &msg->hdr, EPERM);
break;
default:
error_printf("vfio_user_dma_write unknown error %d\n", r);
vfio_user_send_error(vdev->vbasedev.proxy, &msg->hdr, EINVAL);
}
}
/*
* Incoming request message callback.
*
@ -87,7 +177,28 @@ static void vfio_user_msix_teardown(VFIOPCIDevice *vdev)
*/
static void vfio_user_pci_process_req(void *opaque, VFIOUserMsg *msg)
{
VFIOPCIDevice *vdev = opaque;
VFIOUserHdr *hdr = msg->hdr;
/* no incoming PCI requests pass FDs */
if (msg->fds != NULL) {
vfio_user_send_error(vdev->vbasedev.proxy, hdr, EINVAL);
vfio_user_putfds(msg);
return;
}
switch (hdr->command) {
case VFIO_USER_DMA_READ:
vfio_user_dma_read(vdev, (VFIOUserDMARW *)hdr);
break;
case VFIO_USER_DMA_WRITE:
vfio_user_dma_write(vdev, (VFIOUserDMARW *)hdr);
break;
default:
error_printf("vfio_user_pci_process_req unknown cmd %d\n",
hdr->command);
vfio_user_send_error(vdev->vbasedev.proxy, hdr, ENOSYS);
}
}
/*

View file

@ -200,7 +200,18 @@ typedef struct {
char data[];
} VFIOUserRegionRW;
/*imported from struct vfio_bitmap */
/*
* VFIO_USER_DMA_READ
* VFIO_USER_DMA_WRITE
*/
typedef struct {
VFIOUserHdr hdr;
uint64_t offset;
uint32_t count;
char data[];
} VFIOUserDMARW;
/* imported from struct vfio_bitmap */
typedef struct {
uint64_t pgsize;
uint64_t size;

View file

@ -347,6 +347,10 @@ static int vfio_user_recv_one(VFIOUserProxy *proxy, Error **errp)
*msg->hdr = hdr;
data = (char *)msg->hdr + sizeof(hdr);
} else {
if (hdr.size > proxy->max_xfer_size + sizeof(VFIOUserDMARW)) {
error_setg(errp, "vfio_user_recv request larger than max");
goto err;
}
buf = g_malloc0(hdr.size);
memcpy(buf, &hdr, sizeof(hdr));
data = buf + sizeof(hdr);
@ -702,6 +706,40 @@ bool vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
return ok;
}
/*
* async send - msg can be queued, but will be freed when sent
*
* Returns false on failure, in which case @errp will be populated.
*
* In either case, ownership of @hdr and @fds is taken, and the caller must
* *not* free them itself.
*/
static bool vfio_user_send_async(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
VFIOUserFDs *fds, Error **errp)
{
VFIOUserMsg *msg;
QEMU_LOCK_GUARD(&proxy->lock);
msg = vfio_user_getmsg(proxy, hdr, fds);
msg->id = hdr->id;
msg->rsize = 0;
msg->type = VFIO_MSG_ASYNC;
if (!(hdr->flags & (VFIO_USER_NO_REPLY | VFIO_USER_REPLY))) {
error_setg_errno(errp, EINVAL, "%s on sync message", __func__);
vfio_user_recycle(proxy, msg);
return false;
}
if (!vfio_user_send_queued(proxy, msg, errp)) {
vfio_user_recycle(proxy, msg);
return false;
}
return true;
}
void vfio_user_wait_reqs(VFIOUserProxy *proxy)
{
VFIOUserMsg *msg;
@ -746,6 +784,65 @@ void vfio_user_wait_reqs(VFIOUserProxy *proxy)
qemu_mutex_unlock(&proxy->lock);
}
/*
* Reply to an incoming request.
*/
void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size)
{
Error *local_err = NULL;
if (size < sizeof(VFIOUserHdr)) {
error_printf("%s: size too small", __func__);
g_free(hdr);
return;
}
/*
* convert header to associated reply
*/
hdr->flags = VFIO_USER_REPLY;
hdr->size = size;
if (!vfio_user_send_async(proxy, hdr, NULL, &local_err)) {
error_report_err(local_err);
}
}
/*
* Send an error reply to an incoming request.
*/
void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error)
{
Error *local_err = NULL;
/*
* convert header to associated reply
*/
hdr->flags = VFIO_USER_REPLY;
hdr->flags |= VFIO_USER_ERROR;
hdr->error_reply = error;
hdr->size = sizeof(*hdr);
if (!vfio_user_send_async(proxy, hdr, NULL, &local_err)) {
error_report_err(local_err);
}
}
/*
* Close FDs erroneously received in an incoming request.
*/
void vfio_user_putfds(VFIOUserMsg *msg)
{
VFIOUserFDs *fds = msg->fds;
int i;
for (i = 0; i < fds->recv_fds; i++) {
close(fds->fds[i]);
}
g_free(fds);
msg->fds = NULL;
}
static QLIST_HEAD(, VFIOUserProxy) vfio_user_sockets =
QLIST_HEAD_INITIALIZER(vfio_user_sockets);

View file

@ -101,6 +101,7 @@ void vfio_user_set_handler(VFIODevice *vbasedev,
bool vfio_user_validate_version(VFIOUserProxy *proxy, Error **errp);
VFIOUserFDs *vfio_user_getfds(int numfds);
void vfio_user_putfds(VFIOUserMsg *msg);
void vfio_user_request_msg(VFIOUserHdr *hdr, uint16_t cmd,
uint32_t size, uint32_t flags);
@ -109,5 +110,7 @@ bool vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
VFIOUserFDs *fds, int rsize, Error **errp);
bool vfio_user_send_nowait(VFIOUserProxy *proxy, VFIOUserHdr *hdr,
VFIOUserFDs *fds, int rsize, Error **errp);
void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size);
void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error);
#endif /* VFIO_USER_PROXY_H */