net: Allow network backends to advertise max TX queue size

This commit refactors how the maximum transmit queue size for
virtio-net devices is determined, making the mechanism more generic
and extensible.

Previously, virtio_net_max_tx_queue_size() contained hardcoded
checks for specific network backend types (vhost-user and
vhost-vdpa) to determine their supported maximum queue size. This
created direct dependencies and would require modifications for
every new backend that supports variable queue sizes.

To improve flexibility, a new max_tx_queue_size field is added
to the vhost_net structure. This allows each network backend
to advertise its supported maximum transmit queue size directly.

The virtio_net_max_tx_queue_size() function now retrieves the max
TX queue size from the vhost_net struct, if available and set.
Otherwise, it defaults to VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE.

Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Laurent Vivier 2025-07-09 10:24:22 +02:00 committed by Jason Wang
parent 1652f1b335
commit 33b78a30a3
7 changed files with 18 additions and 12 deletions

View file

@ -245,6 +245,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
net->dev.nvqs = options->nvqs;
net->feature_bits = options->feature_bits;
net->save_acked_features = options->save_acked_features;
net->max_tx_queue_size = options->max_tx_queue_size;
net->dev.max_queues = 1;
net->dev.vqs = net->vqs;

View file

@ -670,22 +670,22 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
static int virtio_net_max_tx_queue_size(VirtIONet *n)
{
NetClientState *peer = n->nic_conf.peers.ncs[0];
struct vhost_net *net;
/*
* Backends other than vhost-user or vhost-vdpa don't support max queue
* size.
*/
if (!peer) {
return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
goto default_value;
}
switch(peer->info->type) {
case NET_CLIENT_DRIVER_VHOST_USER:
case NET_CLIENT_DRIVER_VHOST_VDPA:
return VIRTQUEUE_MAX_SIZE;
default:
return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
};
net = get_vhost_net(peer);
if (!net || !net->max_tx_queue_size) {
goto default_value;
}
return net->max_tx_queue_size;
default_value:
return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
}
static int peer_attach(VirtIONet *n, int index)

View file

@ -145,6 +145,7 @@ struct vhost_net {
struct vhost_virtqueue vqs[2];
int backend;
const int *feature_bits;
int max_tx_queue_size;
SaveAcketFeatures *save_acked_features;
NetClientState *nc;
};

View file

@ -16,6 +16,7 @@ typedef struct VhostNetOptions {
uint32_t busyloop_timeout;
unsigned int nvqs;
const int *feature_bits;
int max_tx_queue_size;
GetAckedFeatures *get_acked_features;
SaveAcketFeatures *save_acked_features;
void *opaque;

View file

@ -746,6 +746,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer,
options.feature_bits = kernel_feature_bits;
options.get_acked_features = NULL;
options.save_acked_features = NULL;
options.max_tx_queue_size = 0;
s->vhost_net = vhost_net_init(&options);
if (!s->vhost_net) {

View file

@ -138,6 +138,7 @@ static int vhost_user_start(int queues, NetClientState *ncs[],
options.busyloop_timeout = 0;
options.nvqs = 2;
options.feature_bits = user_feature_bits;
options.max_tx_queue_size = VIRTQUEUE_MAX_SIZE;
options.get_acked_features = vhost_user_get_acked_features;
options.save_acked_features = vhost_user_save_acked_features;

View file

@ -204,6 +204,7 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be,
options.feature_bits = vdpa_feature_bits;
options.get_acked_features = NULL;
options.save_acked_features = NULL;
options.max_tx_queue_size = VIRTQUEUE_MAX_SIZE;
net = vhost_net_init(&options);
if (!net) {