From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:41609) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZDAIi-00055S-PW for qemu-devel@nongnu.org; Thu, 09 Jul 2015 07:47:37 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ZDAIf-0005jt-RT for qemu-devel@nongnu.org; Thu, 09 Jul 2015 07:47:36 -0400 Received: from mx1.redhat.com ([209.132.183.28]:41550) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZDAIf-0005jp-JU for qemu-devel@nongnu.org; Thu, 09 Jul 2015 07:47:33 -0400 Received: from int-mx13.intmail.prod.int.phx2.redhat.com (int-mx13.intmail.prod.int.phx2.redhat.com [10.5.11.26]) by mx1.redhat.com (Postfix) with ESMTPS id 378343A811F for ; Thu, 9 Jul 2015 11:47:33 +0000 (UTC) From: Igor Mammedov Date: Thu, 9 Jul 2015 13:47:21 +0200 Message-Id: <1436442444-132020-5-git-send-email-imammedo@redhat.com> In-Reply-To: <1436442444-132020-1-git-send-email-imammedo@redhat.com> References: <1436442444-132020-1-git-send-email-imammedo@redhat.com> Subject: [Qemu-devel] [PATCH v4 4/7] pc: fix QEMU crashing when more than ~50 memory hotplugged List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: pbonzini@redhat.com, mst@redhat.com QEMU asserts in vhost due to hitting vhost backend limit on number of supported memory regions. Describe all hotplugged memory as one continuos range to vhost with linear 1:1 HVA->GPA mapping in backend. Signed-off-by: Igor Mammedov --- hw/virtio/vhost.c | 47 ++++++++++++++++++++++++++++++++++++++++++++--- include/hw/virtio/vhost.h | 1 + 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 2712c6f..7bc27f0 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -432,6 +432,10 @@ static void vhost_set_memory(MemoryListener *listener, assert(size); + if (!dev->rsvd_hva.mr) { + dev->rsvd_hva = memory_region_find_hva_range(section->mr); + } + /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region; if (add) { @@ -472,6 +476,42 @@ static void vhost_begin(MemoryListener *listener) dev->mem_changed_start_addr = -1; } +static int vhost_set_mem_table(struct vhost_dev *dev) +{ + hwaddr start_addr = 0; + ram_addr_t size = 0; + struct vhost_memory *mem; + int r, i; + + /* drop memory ranges from continuos HVA */ + mem = g_memdup(dev->mem, offsetof(struct vhost_memory, regions) + + dev->mem->nregions * sizeof dev->mem->regions[0]); + start_addr = dev->rsvd_hva.offset_within_address_space; + size = int128_get64(dev->rsvd_hva.size); + for (i = 0; i < mem->nregions; i++) { + if (mem->regions[i].guest_phys_addr >= start_addr && + mem->regions[i].guest_phys_addr < start_addr + size) { + mem->nregions--; + memmove(&mem->regions[i], &mem->regions[i + 1], + (mem->nregions - i) * sizeof mem->regions[0]); + } + } + /* add one continuos HVA entry if memory ranges from it is present */ + if (dev->mem->nregions > mem->nregions) { + struct vhost_memory_region *reg = &mem->regions[mem->nregions]; + + reg->guest_phys_addr = start_addr; + reg->memory_size = size; + reg->userspace_addr = + (__u64)memory_region_get_ram_ptr(dev->rsvd_hva.mr); + mem->nregions++; + } + + r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, mem); + g_free(mem); + return r; +} + static void vhost_commit(MemoryListener *listener) { struct vhost_dev *dev = container_of(listener, struct vhost_dev, @@ -500,7 +540,7 @@ static void vhost_commit(MemoryListener *listener) } if (!dev->log_enabled) { - r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem); + r = vhost_set_mem_table(dev); assert(r >= 0); dev->memory_changed = false; return; @@ -513,7 +553,7 @@ static void vhost_commit(MemoryListener *listener) if (dev->log_size < log_size) { vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); } - r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem); + r = vhost_set_mem_table(dev); assert(r >= 0); /* To log less, can only decrease log size after table update. */ if (dev->log_size > log_size + VHOST_LOG_BUFFER) { @@ -956,6 +996,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, migrate_add_blocker(hdev->migration_blocker); } hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); + memset(&hdev->rsvd_hva, 0, sizeof hdev->rsvd_hva); hdev->n_mem_sections = 0; hdev->mem_sections = NULL; hdev->log = NULL; @@ -1119,7 +1160,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) if (r < 0) { goto fail_features; } - r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem); + r = vhost_set_mem_table(hdev); if (r < 0) { r = -errno; goto fail_mem; diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index dd51050..d41bf2f 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -40,6 +40,7 @@ struct vhost_dev { struct vhost_memory *mem; int n_mem_sections; MemoryRegionSection *mem_sections; + MemoryRegionSection rsvd_hva; struct vhost_virtqueue *vqs; int nvqs; /* the first virtqueue which would be used by this vhost dev */ -- 1.8.3.1