@@ -20,6 +20,8 @@
#include <linux/vhost.h>
#include <sys/ioctl.h>
+static unsigned int vhost_kernel_used_memslots;
+
static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
void *arg)
{
@@ -238,6 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
}
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+ vhost_kernel_used_memslots = dev->mem->nregions;
+}
+
+static unsigned int vhost_kernel_get_used_memslots(void)
+{
+ return vhost_kernel_used_memslots;
+}
+
static const VhostOps kernel_ops = {
.backend_type = VHOST_BACKEND_TYPE_KERNEL,
.vhost_backend_init = vhost_kernel_init,
@@ -269,6 +281,8 @@ static const VhostOps kernel_ops = {
#endif /* CONFIG_VHOST_VSOCK */
.vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
.vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
+ .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
+ .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
};
#endif
@@ -232,6 +232,7 @@ static VhostUserMsg m __attribute__ ((unused));
/* The version of the protocol we support */
#define VHOST_USER_VERSION (0x1)
+static unsigned int vhost_user_used_memslots;
struct vhost_user {
struct vhost_dev *dev;
@@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
user->chr = NULL;
}
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+ unsigned int counter = 0;
+ int i;
+
+ for (i = 0; i < dev->mem->nregions; ++i) {
+ struct vhost_memory_region *reg = dev->mem->regions + i;
+ ram_addr_t offset;
+ MemoryRegion *mr;
+
+ assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+ mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
+ &offset);
+ if (mr && memory_region_get_fd(mr) > 0) {
+ counter++;
+ }
+ }
+ vhost_user_used_memslots = counter;
+}
+
+static unsigned int vhost_user_get_used_memslots(void)
+{
+ return vhost_user_used_memslots;
+}
+
const VhostOps user_ops = {
.backend_type = VHOST_BACKEND_TYPE_USER,
.vhost_backend_init = vhost_user_backend_init,
@@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
.vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
.vhost_get_inflight_fd = vhost_user_get_inflight_fd,
.vhost_set_inflight_fd = vhost_user_set_inflight_fd,
+ .vhost_set_used_memslots = vhost_user_set_used_memslots,
+ .vhost_get_used_memslots = vhost_user_get_used_memslots,
};
@@ -45,20 +45,22 @@
static struct vhost_log *vhost_log;
static struct vhost_log *vhost_log_shm;
-static unsigned int used_memslots;
static QLIST_HEAD(, vhost_dev) vhost_devices =
QLIST_HEAD_INITIALIZER(vhost_devices);
+bool used_memslots_exceeded;
+
bool vhost_has_free_slot(void)
{
- unsigned int slots_limit = ~0U;
struct vhost_dev *hdev;
QLIST_FOREACH(hdev, &vhost_devices, entry) {
- unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
- slots_limit = MIN(slots_limit, r);
+ if (hdev->vhost_ops->vhost_get_used_memslots() >=
+ hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+ return false;
+ }
}
- return slots_limit > used_memslots;
+ return true;
}
static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -502,7 +504,6 @@ static void vhost_commit(MemoryListener *listener)
dev->n_mem_sections * sizeof dev->mem->regions[0];
dev->mem = g_realloc(dev->mem, regions_size);
dev->mem->nregions = dev->n_mem_sections;
- used_memslots = dev->mem->nregions;
for (i = 0; i < dev->n_mem_sections; i++) {
struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
struct MemoryRegionSection *mrs = dev->mem_sections + i;
@@ -678,6 +679,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
memory_region_ref(section->mr);
}
+ dev->vhost_ops->vhost_set_used_memslots(dev);
}
/* Used for both add and nop callbacks */
@@ -693,6 +695,17 @@ static void vhost_region_addnop(MemoryListener *listener,
vhost_region_add_section(dev, section);
}
+static void vhost_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+ memory_listener);
+ if (!vhost_section(dev, section)) {
+ return;
+ }
+ dev->vhost_ops->vhost_set_used_memslots(dev);
+}
+
static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
@@ -1248,6 +1261,19 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
event_notifier_cleanup(&vq->masked_notifier);
}
+static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
+{
+ if (hdev->vhost_ops->vhost_get_used_memslots() >
+ hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+ error_report("vhost backend memory slots limit is less"
+ " than current number of present memory slots");
+ used_memslots_exceeded = true;
+ return true;
+ }
+ used_memslots_exceeded = false;
+ return false;
+}
+
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
VhostBackendType backend_type, uint32_t busyloop_timeout)
{
@@ -1300,6 +1326,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
hdev->memory_listener = (MemoryListener) {
.begin = vhost_begin,
.commit = vhost_commit,
+ .region_del = vhost_region_del,
.region_add = vhost_region_addnop,
.region_nop = vhost_region_addnop,
.log_start = vhost_log_start,
@@ -1346,9 +1373,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
memory_listener_register(&hdev->memory_listener, &address_space_memory);
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
- if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
- error_report("vhost backend memory slots limit is less"
- " than current number of present memory slots");
+ /*
+ * If we started a VM without any vhost device,
+ * vhost_dev_used_memslots_is_exceeded will always return false for the
+ * first time vhost device hot-plug(vhost_get_used_memslots is always 0),
+ * so it needs to double check here
+ */
+ if (vhost_dev_used_memslots_is_exceeded(hdev)) {
r = -1;
if (busyloop_timeout) {
goto fail_busyloop;
@@ -1773,3 +1804,8 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
return -1;
}
+
+bool used_memslots_is_exceeded(void)
+{
+ return used_memslots_exceeded;
+}
@@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
+typedef unsigned int (*vhost_get_used_memslots_op)(void);
+
typedef struct VhostOps {
VhostBackendType backend_type;
vhost_backend_init vhost_backend_init;
@@ -168,6 +171,8 @@ typedef struct VhostOps {
vhost_vq_get_addr_op vhost_vq_get_addr;
vhost_get_device_id_op vhost_get_device_id;
vhost_force_iommu_op vhost_force_iommu;
+ vhost_set_used_memslots_op vhost_set_used_memslots;
+ vhost_get_used_memslots_op vhost_get_used_memslots;
} VhostOps;
extern const VhostOps user_ops;
@@ -144,4 +144,5 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
struct vhost_inflight *inflight);
int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
struct vhost_inflight *inflight);
+bool used_memslots_is_exceeded(void);
#endif
@@ -20,6 +20,7 @@
#include "qemu/error-report.h"
#include "qemu/option.h"
#include "trace.h"
+#include "include/hw/virtio/vhost.h"
typedef struct NetVhostUserState {
NetClientState nc;
@@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
net_vhost_user_event, NULL, nc0->name, NULL,
true);
+
+ if (used_memslots_is_exceeded()) {
+ error_report("used memslots exceeded the backend limit, quit "
+ "loop");
+ goto err;
+ }
} while (!s->started);
assert(s->vhost_net);