1 Star 0 Fork 109

panchenbo / qemu

forked from src-openEuler / qemu 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
vhost-user-add-separate-memslot-counter-for-vhost-us.patch 8.98 KB
一键复制 编辑 原始数据 按行查看 历史
From 185d7efe768229b43911504f64fccd33ad3650ef Mon Sep 17 00:00:00 2001
From: Jinhua Cao <caojinhua1@huawei.com>
Date: Fri, 11 Feb 2022 19:17:59 +0800
Subject: [PATCH] vhost-user: add separate memslot counter for vhost-user
Used_memslots is equal to dev->mem->nregions now, it is true for
vhost kernel, but not for vhost user, which uses the memory regions
that have file descriptor. In fact, not all of the memory regions
have file descriptor.
It is usefully in some scenarios, e.g. used_memslots is 8, and only
5 memory slots can be used by vhost user, it is failed to hot plug
a new memory RAM because vhost_has_free_slot just returned false,
but we can hot plug it safely in fact.
Signed-off-by: Jinhua Cao <caojinhua1@huawei.com>
---
hw/virtio/vhost-backend.c | 14 ++++++++++
hw/virtio/vhost-user.c | 27 ++++++++++++++++++
hw/virtio/vhost.c | 46 +++++++++++++++++++++++++------
include/hw/virtio/vhost-backend.h | 4 +++
4 files changed, 82 insertions(+), 9 deletions(-)
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index b65f8f7e97..2acfb750fd 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -20,6 +20,8 @@
#include <linux/vhost.h>
#include <sys/ioctl.h>
+static unsigned int vhost_kernel_used_memslots;
+
static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
void *arg)
{
@@ -293,6 +295,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
}
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+ vhost_kernel_used_memslots = dev->mem->nregions;
+}
+
+static unsigned int vhost_kernel_get_used_memslots(void)
+{
+ return vhost_kernel_used_memslots;
+}
+
const VhostOps kernel_ops = {
.backend_type = VHOST_BACKEND_TYPE_KERNEL,
.vhost_backend_init = vhost_kernel_init,
@@ -325,6 +337,8 @@ const VhostOps kernel_ops = {
#endif /* CONFIG_VHOST_VSOCK */
.vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
.vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
+ .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
+ .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
};
#endif
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index a8feea489b..176cae9244 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -234,6 +234,7 @@ static VhostUserMsg m __attribute__ ((unused));
/* The version of the protocol we support */
#define VHOST_USER_VERSION (0x1)
+static unsigned int vhost_user_used_memslots;
struct vhost_user {
struct vhost_dev *dev;
@@ -2524,6 +2525,30 @@ void vhost_user_cleanup(VhostUserState *user)
user->chr = NULL;
}
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+ unsigned int counter = 0;
+ int i;
+
+ for (i = 0; i < dev->mem->nregions; ++i) {
+ struct vhost_memory_region *reg = dev->mem->regions + i;
+ ram_addr_t offset;
+ MemoryRegion *mr;
+
+ mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
+ &offset);
+ if (mr && memory_region_get_fd(mr) > 0) {
+ counter++;
+ }
+ }
+ vhost_user_used_memslots = counter;
+}
+
+static unsigned int vhost_user_get_used_memslots(void)
+{
+ return vhost_user_used_memslots;
+}
+
const VhostOps user_ops = {
.backend_type = VHOST_BACKEND_TYPE_USER,
.vhost_backend_init = vhost_user_backend_init,
@@ -2557,4 +2582,6 @@ const VhostOps user_ops = {
.vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
.vhost_get_inflight_fd = vhost_user_get_inflight_fd,
.vhost_set_inflight_fd = vhost_user_set_inflight_fd,
+ .vhost_set_used_memslots = vhost_user_set_used_memslots,
+ .vhost_get_used_memslots = vhost_user_get_used_memslots,
};
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index dafb23c481..e4809777bc 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -45,20 +45,20 @@
static struct vhost_log *vhost_log;
static struct vhost_log *vhost_log_shm;
-static unsigned int used_memslots;
static QLIST_HEAD(, vhost_dev) vhost_devices =
QLIST_HEAD_INITIALIZER(vhost_devices);
bool vhost_has_free_slot(void)
{
- unsigned int slots_limit = ~0U;
struct vhost_dev *hdev;
QLIST_FOREACH(hdev, &vhost_devices, entry) {
- unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
- slots_limit = MIN(slots_limit, r);
+ if (hdev->vhost_ops->vhost_get_used_memslots() >=
+ hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+ return false;
+ }
}
- return slots_limit > used_memslots;
+ return true;
}
static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -521,7 +521,6 @@ static void vhost_commit(MemoryListener *listener)
dev->n_mem_sections * sizeof dev->mem->regions[0];
dev->mem = g_realloc(dev->mem, regions_size);
dev->mem->nregions = dev->n_mem_sections;
- used_memslots = dev->mem->nregions;
for (i = 0; i < dev->n_mem_sections; i++) {
struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
struct MemoryRegionSection *mrs = dev->mem_sections + i;
@@ -697,6 +696,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
memory_region_ref(section->mr);
}
+ dev->vhost_ops->vhost_set_used_memslots(dev);
}
/* Used for both add and nop callbacks */
@@ -712,6 +712,17 @@ static void vhost_region_addnop(MemoryListener *listener,
vhost_region_add_section(dev, section);
}
+static void vhost_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+ memory_listener);
+ if (!vhost_section(dev, section)) {
+ return;
+ }
+ dev->vhost_ops->vhost_set_used_memslots(dev);
+}
+
static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
@@ -1319,6 +1330,18 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
event_notifier_cleanup(&vq->masked_notifier);
}
+static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
+{
+ if (hdev->vhost_ops->vhost_get_used_memslots() >
+ hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+ error_report("vhost backend memory slots limit is less"
+ " than current number of present memory slots");
+ return true;
+ }
+
+ return false;
+}
+
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
VhostBackendType backend_type, uint32_t busyloop_timeout,
Error **errp)
@@ -1374,6 +1397,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
.name = "vhost",
.begin = vhost_begin,
.commit = vhost_commit,
+ .region_del = vhost_region_del,
.region_add = vhost_region_addnop,
.region_nop = vhost_region_addnop,
.log_start = vhost_log_start,
@@ -1420,9 +1444,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
memory_listener_register(&hdev->memory_listener, &address_space_memory);
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
- if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
- error_setg(errp, "vhost backend memory slots limit is less"
- " than current number of present memory slots");
+ /*
+ * If we started a VM without any vhost device,
+ * vhost_dev_used_memslots_is_exceeded will always return false for the
+ * first time vhost device hot-plug(vhost_get_used_memslots is always 0),
+ * so it needs to double check here
+ */
+ if (vhost_dev_used_memslots_is_exceeded(hdev)) {
r = -EINVAL;
goto fail_busyloop;
}
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 81bf3109f8..a64708f456 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -125,6 +125,8 @@ typedef int (*vhost_vq_get_addr_op)(struct vhost_dev *dev,
typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
+typedef unsigned int (*vhost_get_used_memslots_op)(void);
typedef struct VhostOps {
VhostBackendType backend_type;
@@ -171,6 +173,8 @@ typedef struct VhostOps {
vhost_vq_get_addr_op vhost_vq_get_addr;
vhost_get_device_id_op vhost_get_device_id;
vhost_force_iommu_op vhost_force_iommu;
+ vhost_set_used_memslots_op vhost_set_used_memslots;
+ vhost_get_used_memslots_op vhost_get_used_memslots;
} VhostOps;
int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
--
2.27.0
1
https://gitee.com/panchenbo/qemu.git
git@gitee.com:panchenbo/qemu.git
panchenbo
qemu
qemu
master

搜索帮助