All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: Igor Mammedov <imammedo@redhat.com>
To: qemu-devel@nongnu.org
Cc: pbonzini@redhat.com, mst@redhat.com
Subject: [Qemu-devel] [PATCH v4 7/7] memory: add support for deleting HVA mapped MemoryRegion
Date: Thu,  9 Jul 2015 13:47:24 +0200	[thread overview]
Message-ID: <1436442444-132020-8-git-send-email-imammedo@redhat.com> (raw)
In-Reply-To: <1436442444-132020-1-git-send-email-imammedo@redhat.com>

Although memory_region_del_subregion() removes MemoryRegion
from current address space, it's possible that it's still
in use/referenced until old address space view is destroyed.
That doesn't allow to unmap it from HVA region at the time
of memory_region_del_subregion().
As a solution track HVA mapped MemoryRegions in a list and
don't allow to map another MemoryRegion at the same address
until respective MemoryRegion is destroyed, delaying unmapping
from HVA range to the time MemoryRegion destructor is called.
Also add checks to memory_region_add_subregion_to_hva() to make
sure that HVA range is available for mapping a new region
or return error from it if address range is not available.

In memory hotplug terms it would mean that user should delete
corresponding backend along with pc-dimm device:
 device_del dimm1
 object_del dimm1_backend_memdev
after that dimm1_backend_memdev's MemoryRegion will be destroyed
once all accesses to it are gone and old flatview is destroyed as
well.

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
---
 hw/mem/pc-dimm.c      |  6 +++++-
 include/exec/memory.h |  6 +++++-
 memory.c              | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 65 insertions(+), 3 deletions(-)

diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index cb98926..8e5f388 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -95,7 +95,11 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
         goto out;
     }
 
-    memory_region_add_subregion_to_hva(&hpms->mr, addr - hpms->base, mr);
+    memory_region_add_subregion_to_hva(&hpms->mr, addr - hpms->base, mr,
+                                       &local_err);
+    if (local_err) {
+        goto out;
+    }
     vmstate_register_ram(mr, dev);
     numa_set_mem_node_id(addr, memory_region_size(mr), dimm->node);
 
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 1f2cbd1..0af272d 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -174,6 +174,7 @@ struct MemoryRegion {
     bool romd_mode;
     bool ram;
     void *rsvd_hva;
+    bool hva_mapped;
     bool skip_dump;
     bool readonly; /* For RAM regions */
     bool enabled;
@@ -188,6 +189,7 @@ struct MemoryRegion {
     QTAILQ_HEAD(subregions, MemoryRegion) subregions;
     QTAILQ_ENTRY(MemoryRegion) subregions_link;
     QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
+    QTAILQ_ENTRY(MemoryRegion) hva_link;
     const char *name;
     uint8_t dirty_log_mask;
     unsigned ioeventfd_nb;
@@ -943,10 +945,12 @@ void memory_region_add_subregion(MemoryRegion *mr,
  *      initialized with memory_region_init().
  * @offset: the offset relative to @mr where @subregion is added.
  * @subregion: the subregion to be added.
+ * @errp: contains error if remapping to HVA fails/not possible
  */
 void memory_region_add_subregion_to_hva(MemoryRegion *mr,
                                         hwaddr offset,
-                                        MemoryRegion *subregion);
+                                        MemoryRegion *subregion,
+                                        Error **errp);
 
 /**
  * memory_region_add_subregion_overlap: Add a subregion to a container
diff --git a/memory.c b/memory.c
index bf6aa4e..fad0b8f 100644
--- a/memory.c
+++ b/memory.c
@@ -34,6 +34,7 @@ static unsigned memory_region_transaction_depth;
 static bool memory_region_update_pending;
 static bool ioeventfd_update_pending;
 static bool global_dirty_log = false;
+static QemuMutex hva_lock;
 
 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
     = QTAILQ_HEAD_INITIALIZER(memory_listeners);
@@ -1761,6 +1762,24 @@ done:
     memory_region_transaction_commit();
 }
 
+static QTAILQ_HEAD(, MemoryRegion) hva_mapped_head =
+    QTAILQ_HEAD_INITIALIZER(hva_mapped_head);
+
+static void memory_region_destructor_hva_ram(MemoryRegion *mr)
+{
+    MemoryRegion *h, *tmp;
+
+    qemu_mutex_lock(&hva_lock);
+    qemu_ram_unmap_hva(mr->ram_addr);
+    memory_region_destructor_ram(mr);
+    QTAILQ_FOREACH_SAFE(h, &hva_mapped_head, hva_link, tmp) {
+        if (mr == h) {
+            QTAILQ_REMOVE(&hva_mapped_head, h, hva_link);
+        }
+    }
+    qemu_mutex_unlock(&hva_lock);
+}
+
 static void memory_region_add_subregion_common(MemoryRegion *mr,
                                                hwaddr offset,
                                                MemoryRegion *subregion)
@@ -1792,11 +1811,45 @@ void memory_region_add_subregion_overlap(MemoryRegion *mr,
 
 void memory_region_add_subregion_to_hva(MemoryRegion *mr,
                                         hwaddr offset,
-                                        MemoryRegion *subregion)
+                                        MemoryRegion *subregion,
+                                        Error **errp)
 {
     if (mr->rsvd_hva && subregion->ram) {
+        MemoryRegion *h, *tmp;
+        Int128 e, oe;
+
+        qemu_mutex_lock(&hva_lock);
+        QTAILQ_FOREACH_SAFE(h, &hva_mapped_head, hva_link, tmp) {
+            if (subregion->hva_mapped) {
+                error_setg(errp, "HVA mapped memory region '%s' is not "
+                           "reusable, use a new one instead",
+                           subregion->name);
+                qemu_mutex_unlock(&hva_lock);
+                return;
+            }
+
+            e = int128_add(int128_make64(h->addr),
+                           int128_make64(memory_region_size(h)));
+            oe = int128_add(int128_make64(offset),
+                            int128_make64(memory_region_size(subregion)));
+            if (offset >= h->addr && int128_le(oe, e)) {
+                MemoryRegionSection rsvd_hva;
+                rsvd_hva = memory_region_find_hva_range(mr);
+                error_setg(errp, "memory at 0x%" PRIx64 " is still in use"
+                           "by HVA mapped region: %s",
+                           rsvd_hva.offset_within_address_space + offset,
+                           h->name);
+                qemu_mutex_unlock(&hva_lock);
+                return;
+            }
+        }
+
+        QTAILQ_INSERT_TAIL(&hva_mapped_head, subregion, hva_link);
+        subregion->destructor = memory_region_destructor_hva_ram;
+        subregion->hva_mapped = true;
         qemu_ram_remap_hva(subregion->ram_addr,
                            memory_region_get_ram_ptr(mr) + offset);
+        qemu_mutex_unlock(&hva_lock);
     }
     memory_region_add_subregion(mr, offset, subregion);
 }
@@ -2290,6 +2343,7 @@ static const TypeInfo memory_region_info = {
 static void memory_register_types(void)
 {
     type_register_static(&memory_region_info);
+    qemu_mutex_init(&hva_lock);
 }
 
 type_init(memory_register_types)
-- 
1.8.3.1

  parent reply	other threads:[~2015-07-09 11:47 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-07-09 11:47 [Qemu-devel] [PATCH v4 0/7] Fix QEMU crash during memory hotplug with vhost=on Igor Mammedov
2015-07-09 11:47 ` [Qemu-devel] [PATCH v4 1/7] memory: get rid of memory_region_destructor_ram_from_ptr() Igor Mammedov
2015-07-09 11:47 ` [Qemu-devel] [PATCH v4 2/7] memory: introduce MemoryRegion container with reserved HVA range Igor Mammedov
2015-07-09 11:47 ` [Qemu-devel] [PATCH v4 3/7] pc: reserve hotpluggable memory range with memory_region_init_hva_range() Igor Mammedov
2015-07-09 11:47 ` [Qemu-devel] [PATCH v4 4/7] pc: fix QEMU crashing when more than ~50 memory hotplugged Igor Mammedov
2015-07-09 13:06   ` Michael S. Tsirkin
2015-07-09 13:43     ` Paolo Bonzini
2015-07-09 13:46       ` Michael S. Tsirkin
2015-07-10 10:12         ` Igor Mammedov
2015-07-13  6:55           ` Michael S. Tsirkin
2015-07-13 18:55             ` Igor Mammedov
2015-07-13 20:14               ` Michael S. Tsirkin
2015-07-14 13:02                 ` Igor Mammedov
2015-07-14 13:14                   ` Michael S. Tsirkin
2015-07-09 11:47 ` [Qemu-devel] [PATCH v4 5/7] exec: make sure that RAMBlock descriptor won't be leaked Igor Mammedov
2015-07-09 11:47 ` [Qemu-devel] [PATCH v4 6/7] exec: add qemu_ram_unmap_hva() API for unmapping memory from HVA area Igor Mammedov
2015-07-09 11:47 ` Igor Mammedov [this message]
2015-07-15 15:12 ` [Qemu-devel] [PATCH v4 0/7] Fix QEMU crash during memory hotplug with vhost=on Igor Mammedov
2015-07-15 16:32   ` Michael S. Tsirkin
2015-07-16  7:26     ` Igor Mammedov
2015-07-16  7:35       ` Michael S. Tsirkin
2015-07-16  9:42         ` Igor Mammedov
2015-07-16 10:24           ` Michael S. Tsirkin
2015-07-16 11:11             ` Igor Mammedov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1436442444-132020-8-git-send-email-imammedo@redhat.com \
    --to=imammedo@redhat.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.