All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/2] x86/P2M: pass on errors from p2m_set_entry()
@ 2014-05-06 13:18 Jan Beulich
  2014-05-06 13:33 ` [PATCH v2 1/2] " Jan Beulich
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Jan Beulich @ 2014-05-06 13:18 UTC (permalink / raw
  To: xen-devel; +Cc: Tim Deegan

1: pass on errors from p2m_set_entry()
2: p2m_change_type() should pass on error from p2m_set_entry()

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Split out change to p2m_change_type().

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 1/2] x86/P2M: pass on errors from p2m_set_entry()
  2014-05-06 13:18 [PATCH v2 0/2] x86/P2M: pass on errors from p2m_set_entry() Jan Beulich
@ 2014-05-06 13:33 ` Jan Beulich
  2014-05-06 14:09   ` Andrew Cooper
  2014-05-06 13:34 ` [PATCH v2 2/2] x86/P2M: p2m_change_type() should pass on error " Jan Beulich
  2014-05-08 11:16 ` [PATCH v2 0/2] x86/P2M: pass on errors " Tim Deegan
  2 siblings, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2014-05-06 13:33 UTC (permalink / raw
  To: xen-devel; +Cc: Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 2938 bytes --]

... at least in a couple of straightforward cases.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Split out change to p2m_change_type().

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -501,7 +501,7 @@ void p2m_final_teardown(struct domain *d
 }
 
 
-static void
+static int
 p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
                 unsigned int page_order)
 {
@@ -515,7 +515,7 @@ p2m_remove_page(struct p2m_domain *p2m, 
         if ( need_iommu(p2m->domain) )
             for ( i = 0; i < (1 << page_order); i++ )
                 iommu_unmap_page(p2m->domain, mfn + i);
-        return;
+        return 0;
     }
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
@@ -531,8 +531,8 @@ p2m_remove_page(struct p2m_domain *p2m, 
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
     }
-    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
-                  p2m->default_access);
+    return p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
+                         p2m->default_access);
 }
 
 void
@@ -957,8 +957,7 @@ int p2m_mem_paging_nominate(struct domai
         goto out;
 
     /* Fix p2m entry */
-    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
-    ret = 0;
+    ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
 
  out:
     gfn_unlock(p2m, gfn, 0);
@@ -1022,7 +1021,8 @@ int p2m_mem_paging_evict(struct domain *
         put_page(page);
 
     /* Remove mapping from p2m table */
-    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, a);
+    ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+                        p2m_ram_paged, a);
 
     /* Clear content before returning the page to Xen */
     scrub_one_page(page);
@@ -1030,8 +1030,6 @@ int p2m_mem_paging_evict(struct domain *
     /* Track number of paged gfns */
     atomic_inc(&d->paged_pages);
 
-    ret = 0;
-
  out_put:
     /* Put the page back so it gets freed */
     put_page(page);
@@ -1231,16 +1229,14 @@ int p2m_mem_paging_prep(struct domain *d
     /* Make the page already guest-accessible. If the pager still has a
      * pending resume operation, it will be idempotent p2m entry-wise,
      * but will unpause the vcpu */
-    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                  paging_mode_log_dirty(d) ? p2m_ram_logdirty :
-                  p2m_ram_rw, a);
+    ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+                        paging_mode_log_dirty(d) ? p2m_ram_logdirty
+                                                 : p2m_ram_rw, a);
     set_gpfn_from_mfn(mfn_x(mfn), gfn);
 
     if ( !page_extant )
         atomic_dec(&d->paged_pages);
 
-    ret = 0;
-
  out:
     gfn_unlock(p2m, gfn, 0);
     return ret;




[-- Attachment #2: x86-p2m-set-entry-simple.patch --]
[-- Type: text/plain, Size: 2980 bytes --]

x86/P2M: pass on errors from p2m_set_entry()

... at least in a couple of straightforward cases.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Split out change to p2m_change_type().

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -501,7 +501,7 @@ void p2m_final_teardown(struct domain *d
 }
 
 
-static void
+static int
 p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
                 unsigned int page_order)
 {
@@ -515,7 +515,7 @@ p2m_remove_page(struct p2m_domain *p2m, 
         if ( need_iommu(p2m->domain) )
             for ( i = 0; i < (1 << page_order); i++ )
                 iommu_unmap_page(p2m->domain, mfn + i);
-        return;
+        return 0;
     }
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
@@ -531,8 +531,8 @@ p2m_remove_page(struct p2m_domain *p2m, 
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
     }
-    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
-                  p2m->default_access);
+    return p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
+                         p2m->default_access);
 }
 
 void
@@ -957,8 +957,7 @@ int p2m_mem_paging_nominate(struct domai
         goto out;
 
     /* Fix p2m entry */
-    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
-    ret = 0;
+    ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
 
  out:
     gfn_unlock(p2m, gfn, 0);
@@ -1022,7 +1021,8 @@ int p2m_mem_paging_evict(struct domain *
         put_page(page);
 
     /* Remove mapping from p2m table */
-    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, a);
+    ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+                        p2m_ram_paged, a);
 
     /* Clear content before returning the page to Xen */
     scrub_one_page(page);
@@ -1030,8 +1030,6 @@ int p2m_mem_paging_evict(struct domain *
     /* Track number of paged gfns */
     atomic_inc(&d->paged_pages);
 
-    ret = 0;
-
  out_put:
     /* Put the page back so it gets freed */
     put_page(page);
@@ -1231,16 +1229,14 @@ int p2m_mem_paging_prep(struct domain *d
     /* Make the page already guest-accessible. If the pager still has a
      * pending resume operation, it will be idempotent p2m entry-wise,
      * but will unpause the vcpu */
-    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                  paging_mode_log_dirty(d) ? p2m_ram_logdirty :
-                  p2m_ram_rw, a);
+    ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+                        paging_mode_log_dirty(d) ? p2m_ram_logdirty
+                                                 : p2m_ram_rw, a);
     set_gpfn_from_mfn(mfn_x(mfn), gfn);
 
     if ( !page_extant )
         atomic_dec(&d->paged_pages);
 
-    ret = 0;
-
  out:
     gfn_unlock(p2m, gfn, 0);
     return ret;

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 2/2] x86/P2M: p2m_change_type() should pass on error from p2m_set_entry()
  2014-05-06 13:18 [PATCH v2 0/2] x86/P2M: pass on errors from p2m_set_entry() Jan Beulich
  2014-05-06 13:33 ` [PATCH v2 1/2] " Jan Beulich
@ 2014-05-06 13:34 ` Jan Beulich
  2014-05-08 11:16 ` [PATCH v2 0/2] x86/P2M: pass on errors " Tim Deegan
  2 siblings, 0 replies; 5+ messages in thread
From: Jan Beulich @ 2014-05-06 13:34 UTC (permalink / raw
  To: xen-devel
  Cc: Keir Fraser, Jinsong Liu, Christoph Egger, Tim Deegan,
	Aravind Gopalakrishnan, suravee.suthikulpanit,
	Andres Lagar-Cavilla

[-- Attachment #1: Type: text/plain, Size: 6797 bytes --]

Modify the function's name to help eventual backports involving this
function, and in one case where this is trivially possible also stop
ignoring its return value.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Split out from the previous patch to follow Tim's request to have
    the function return an error indication rather than the old type,
    in turn making it desirable to rename it so that eventual backport
    oversights can be avoided.

--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1334,9 +1334,10 @@ long arch_do_domctl(
         unsigned long pfn = domctl->u.set_broken_page_p2m.pfn;
         mfn_t mfn = get_gfn_query(d, pfn, &pt);
 
-        if ( unlikely(!mfn_valid(mfn_x(mfn)) || !p2m_is_ram(pt) ||
-                     (p2m_change_type(d, pfn, pt, p2m_ram_broken) != pt)) )
+        if ( unlikely(!mfn_valid(mfn_x(mfn))) || unlikely(!p2m_is_ram(pt)) )
             ret = -EINVAL;
+        else
+            ret = p2m_change_type_one(d, pfn, pt, p2m_ram_broken);
 
         put_gfn(d, pfn);
     }
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1698,7 +1698,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
         if ( access_w )
         {
             paging_mark_dirty(v->domain, mfn_x(mfn));
-            p2m_change_type(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+            p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
         }
         rc = 1;
         goto out_put_gfn;
@@ -4540,7 +4540,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
         while ( a.nr > start_iter )
         {
             unsigned long pfn = a.first_pfn + start_iter;
-            p2m_type_t t, nt;
+            p2m_type_t t;
 
             get_gfn_unshare(d, pfn, &t);
             if ( p2m_is_paging(t) )
@@ -4563,16 +4563,10 @@ long do_hvm_op(unsigned long op, XEN_GUE
                 goto param_fail4;
             }
 
-            nt = p2m_change_type(d, pfn, t, memtype[a.hvmmem_type]);
-            if ( nt != t )
-            {
-                put_gfn(d, pfn);
-                printk(XENLOG_G_WARNING
-                       "d%d: GFN %#lx type changed from %d to %d while trying to change it to %d\n",
-                       d->domain_id, pfn, t, nt, memtype[a.hvmmem_type]);
-                goto param_fail4;
-            }
+            rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
             put_gfn(d, pfn);
+            if ( rc )
+                goto param_fail4;
 
             /* Check for continuation if it's not the last interation */
             if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -913,7 +913,7 @@ int mem_sharing_nominate_page(struct dom
     }
 
     /* Change the p2m type, should never fail with p2m locked. */
-    BUG_ON(p2m_change_type(d, gfn, p2mt, p2m_ram_shared) != p2mt);
+    BUG_ON(p2m_change_type_one(d, gfn, p2mt, p2m_ram_shared));
 
     /* Account for this page. */
     atomic_inc(&nr_shared_mfns);
@@ -1236,8 +1236,7 @@ int __mem_sharing_unshare_page(struct do
     put_page_and_type(old_page);
 
 private_page_found:    
-    if ( p2m_change_type(d, gfn, p2m_ram_shared, p2m_ram_rw) != 
-                                                p2m_ram_shared ) 
+    if ( p2m_change_type_one(d, gfn, p2m_ram_shared, p2m_ram_rw) )
     {
         gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n", 
                                 d->domain_id, gfn);
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -704,27 +704,33 @@ out:
 }
 
 
-/* Modify the p2m type of a single gfn from ot to nt, returning the 
- * entry's previous type.  Resets the access permissions. */
-p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, 
-                           p2m_type_t ot, p2m_type_t nt)
+/*
+ * Modify the p2m type of a single gfn from ot to nt.
+ * Returns: 0 for success, -errno for failure.
+ * Resets the access permissions.
+ */
+int p2m_change_type_one(struct domain *d, unsigned long gfn,
+                       p2m_type_t ot, p2m_type_t nt)
 {
     p2m_access_t a;
     p2m_type_t pt;
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    int rc;
 
     BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
 
     gfn_lock(p2m, gfn, 0);
 
     mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL);
-    if ( pt == ot )
-        p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
+    rc = likely(pt == ot)
+         ? p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt,
+                         p2m->default_access)
+         : -EBUSY;
 
     gfn_unlock(p2m, gfn, 0);
 
-    return pt;
+    return rc;
 }
 
 /* Modify the p2m type of a range of gfns from ot to nt. */
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -469,12 +469,8 @@ void paging_log_dirty_range(struct domai
     p2m_lock(p2m);
 
     for ( i = 0, pfn = begin_pfn; pfn < begin_pfn + nr; i++, pfn++ )
-    {
-        p2m_type_t pt;
-        pt = p2m_change_type(d, pfn, p2m_ram_rw, p2m_ram_logdirty);
-        if ( pt == p2m_ram_rw )
+        if ( !p2m_change_type_one(d, pfn, p2m_ram_rw, p2m_ram_logdirty) )
             dirty_bitmap[i >> 3] |= (1 << (i & 7));
-    }
 
     p2m_unlock(p2m);
 
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -444,8 +444,7 @@ int unmmap_broken_page(struct domain *d,
     if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
     {
         ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
-        p2m_change_type(d, gfn, pt, p2m_ram_broken);
-        rc = 0;
+        rc = p2m_change_type_one(d, gfn, pt, p2m_ram_broken);
     }
     put_gfn(d, gfn);
 
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -823,7 +823,7 @@ int guest_iommu_set_base(struct domain *
         unsigned long gfn = base + i;
 
         get_gfn_query(d, gfn, &t);
-        p2m_change_type(d, gfn, t, p2m_mmio_dm);
+        p2m_change_type_one(d, gfn, t, p2m_mmio_dm);
         put_gfn(d, gfn);
     }
 
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -519,8 +519,8 @@ void p2m_change_type_range(struct domain
                            p2m_type_t ot, p2m_type_t nt);
 
 /* Compare-exchange the type of a single p2m entry */
-p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
-                           p2m_type_t ot, p2m_type_t nt);
+int p2m_change_type_one(struct domain *d, unsigned long gfn,
+                        p2m_type_t ot, p2m_type_t nt);
 
 /* Report a change affecting memory types. */
 void p2m_memory_type_changed(struct domain *d);



[-- Attachment #2: x86-p2m-change-type-check-set-entry.patch --]
[-- Type: text/plain, Size: 6865 bytes --]

x86/P2M: p2m_change_type() should pass on error from p2m_set_entry()

Modify the function's name to help eventual backports involving this
function, and in one case where this is trivially possible also stop
ignoring its return value.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Split out from the previous patch to follow Tim's request to have
    the function return an error indication rather than the old type,
    in turn making it desirable to rename it so that eventual backport
    oversights can be avoided.

--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1334,9 +1334,10 @@ long arch_do_domctl(
         unsigned long pfn = domctl->u.set_broken_page_p2m.pfn;
         mfn_t mfn = get_gfn_query(d, pfn, &pt);
 
-        if ( unlikely(!mfn_valid(mfn_x(mfn)) || !p2m_is_ram(pt) ||
-                     (p2m_change_type(d, pfn, pt, p2m_ram_broken) != pt)) )
+        if ( unlikely(!mfn_valid(mfn_x(mfn))) || unlikely(!p2m_is_ram(pt)) )
             ret = -EINVAL;
+        else
+            ret = p2m_change_type_one(d, pfn, pt, p2m_ram_broken);
 
         put_gfn(d, pfn);
     }
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1698,7 +1698,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
         if ( access_w )
         {
             paging_mark_dirty(v->domain, mfn_x(mfn));
-            p2m_change_type(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+            p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
         }
         rc = 1;
         goto out_put_gfn;
@@ -4540,7 +4540,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
         while ( a.nr > start_iter )
         {
             unsigned long pfn = a.first_pfn + start_iter;
-            p2m_type_t t, nt;
+            p2m_type_t t;
 
             get_gfn_unshare(d, pfn, &t);
             if ( p2m_is_paging(t) )
@@ -4563,16 +4563,10 @@ long do_hvm_op(unsigned long op, XEN_GUE
                 goto param_fail4;
             }
 
-            nt = p2m_change_type(d, pfn, t, memtype[a.hvmmem_type]);
-            if ( nt != t )
-            {
-                put_gfn(d, pfn);
-                printk(XENLOG_G_WARNING
-                       "d%d: GFN %#lx type changed from %d to %d while trying to change it to %d\n",
-                       d->domain_id, pfn, t, nt, memtype[a.hvmmem_type]);
-                goto param_fail4;
-            }
+            rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
             put_gfn(d, pfn);
+            if ( rc )
+                goto param_fail4;
 
             /* Check for continuation if it's not the last interation */
             if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -913,7 +913,7 @@ int mem_sharing_nominate_page(struct dom
     }
 
     /* Change the p2m type, should never fail with p2m locked. */
-    BUG_ON(p2m_change_type(d, gfn, p2mt, p2m_ram_shared) != p2mt);
+    BUG_ON(p2m_change_type_one(d, gfn, p2mt, p2m_ram_shared));
 
     /* Account for this page. */
     atomic_inc(&nr_shared_mfns);
@@ -1236,8 +1236,7 @@ int __mem_sharing_unshare_page(struct do
     put_page_and_type(old_page);
 
 private_page_found:    
-    if ( p2m_change_type(d, gfn, p2m_ram_shared, p2m_ram_rw) != 
-                                                p2m_ram_shared ) 
+    if ( p2m_change_type_one(d, gfn, p2m_ram_shared, p2m_ram_rw) )
     {
         gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n", 
                                 d->domain_id, gfn);
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -704,27 +704,33 @@ out:
 }
 
 
-/* Modify the p2m type of a single gfn from ot to nt, returning the 
- * entry's previous type.  Resets the access permissions. */
-p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, 
-                           p2m_type_t ot, p2m_type_t nt)
+/*
+ * Modify the p2m type of a single gfn from ot to nt.
+ * Returns: 0 for success, -errno for failure.
+ * Resets the access permissions.
+ */
+int p2m_change_type_one(struct domain *d, unsigned long gfn,
+                       p2m_type_t ot, p2m_type_t nt)
 {
     p2m_access_t a;
     p2m_type_t pt;
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    int rc;
 
     BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
 
     gfn_lock(p2m, gfn, 0);
 
     mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL);
-    if ( pt == ot )
-        p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
+    rc = likely(pt == ot)
+         ? p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt,
+                         p2m->default_access)
+         : -EBUSY;
 
     gfn_unlock(p2m, gfn, 0);
 
-    return pt;
+    return rc;
 }
 
 /* Modify the p2m type of a range of gfns from ot to nt. */
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -469,12 +469,8 @@ void paging_log_dirty_range(struct domai
     p2m_lock(p2m);
 
     for ( i = 0, pfn = begin_pfn; pfn < begin_pfn + nr; i++, pfn++ )
-    {
-        p2m_type_t pt;
-        pt = p2m_change_type(d, pfn, p2m_ram_rw, p2m_ram_logdirty);
-        if ( pt == p2m_ram_rw )
+        if ( !p2m_change_type_one(d, pfn, p2m_ram_rw, p2m_ram_logdirty) )
             dirty_bitmap[i >> 3] |= (1 << (i & 7));
-    }
 
     p2m_unlock(p2m);
 
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -444,8 +444,7 @@ int unmmap_broken_page(struct domain *d,
     if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
     {
         ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
-        p2m_change_type(d, gfn, pt, p2m_ram_broken);
-        rc = 0;
+        rc = p2m_change_type_one(d, gfn, pt, p2m_ram_broken);
     }
     put_gfn(d, gfn);
 
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -823,7 +823,7 @@ int guest_iommu_set_base(struct domain *
         unsigned long gfn = base + i;
 
         get_gfn_query(d, gfn, &t);
-        p2m_change_type(d, gfn, t, p2m_mmio_dm);
+        p2m_change_type_one(d, gfn, t, p2m_mmio_dm);
         put_gfn(d, gfn);
     }
 
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -519,8 +519,8 @@ void p2m_change_type_range(struct domain
                            p2m_type_t ot, p2m_type_t nt);
 
 /* Compare-exchange the type of a single p2m entry */
-p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
-                           p2m_type_t ot, p2m_type_t nt);
+int p2m_change_type_one(struct domain *d, unsigned long gfn,
+                        p2m_type_t ot, p2m_type_t nt);
 
 /* Report a change affecting memory types. */
 void p2m_memory_type_changed(struct domain *d);

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 1/2] x86/P2M: pass on errors from p2m_set_entry()
  2014-05-06 13:33 ` [PATCH v2 1/2] " Jan Beulich
@ 2014-05-06 14:09   ` Andrew Cooper
  0 siblings, 0 replies; 5+ messages in thread
From: Andrew Cooper @ 2014-05-06 14:09 UTC (permalink / raw
  To: Jan Beulich; +Cc: xen-devel, Tim Deegan


[-- Attachment #1.1: Type: text/plain, Size: 3256 bytes --]

On 06/05/14 14:33, Jan Beulich wrote:
> ... at least in a couple of straightforward cases.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

> ---
> v2: Split out change to p2m_change_type().
>
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -501,7 +501,7 @@ void p2m_final_teardown(struct domain *d
>  }
>  
>  
> -static void
> +static int
>  p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
>                  unsigned int page_order)
>  {
> @@ -515,7 +515,7 @@ p2m_remove_page(struct p2m_domain *p2m, 
>          if ( need_iommu(p2m->domain) )
>              for ( i = 0; i < (1 << page_order); i++ )
>                  iommu_unmap_page(p2m->domain, mfn + i);
> -        return;
> +        return 0;
>      }
>  
>      ASSERT(gfn_locked_by_me(p2m, gfn));
> @@ -531,8 +531,8 @@ p2m_remove_page(struct p2m_domain *p2m, 
>              ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
>          }
>      }
> -    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
> -                  p2m->default_access);
> +    return p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
> +                         p2m->default_access);
>  }
>  
>  void
> @@ -957,8 +957,7 @@ int p2m_mem_paging_nominate(struct domai
>          goto out;
>  
>      /* Fix p2m entry */
> -    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
> -    ret = 0;
> +    ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
>  
>   out:
>      gfn_unlock(p2m, gfn, 0);
> @@ -1022,7 +1021,8 @@ int p2m_mem_paging_evict(struct domain *
>          put_page(page);
>  
>      /* Remove mapping from p2m table */
> -    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, a);
> +    ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
> +                        p2m_ram_paged, a);
>  
>      /* Clear content before returning the page to Xen */
>      scrub_one_page(page);
> @@ -1030,8 +1030,6 @@ int p2m_mem_paging_evict(struct domain *
>      /* Track number of paged gfns */
>      atomic_inc(&d->paged_pages);
>  
> -    ret = 0;
> -
>   out_put:
>      /* Put the page back so it gets freed */
>      put_page(page);
> @@ -1231,16 +1229,14 @@ int p2m_mem_paging_prep(struct domain *d
>      /* Make the page already guest-accessible. If the pager still has a
>       * pending resume operation, it will be idempotent p2m entry-wise,
>       * but will unpause the vcpu */
> -    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
> -                  paging_mode_log_dirty(d) ? p2m_ram_logdirty :
> -                  p2m_ram_rw, a);
> +    ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
> +                        paging_mode_log_dirty(d) ? p2m_ram_logdirty
> +                                                 : p2m_ram_rw, a);
>      set_gpfn_from_mfn(mfn_x(mfn), gfn);
>  
>      if ( !page_extant )
>          atomic_dec(&d->paged_pages);
>  
> -    ret = 0;
> -
>   out:
>      gfn_unlock(p2m, gfn, 0);
>      return ret;
>
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel


[-- Attachment #1.2: Type: text/html, Size: 4043 bytes --]

[-- Attachment #2: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 0/2] x86/P2M: pass on errors from p2m_set_entry()
  2014-05-06 13:18 [PATCH v2 0/2] x86/P2M: pass on errors from p2m_set_entry() Jan Beulich
  2014-05-06 13:33 ` [PATCH v2 1/2] " Jan Beulich
  2014-05-06 13:34 ` [PATCH v2 2/2] x86/P2M: p2m_change_type() should pass on error " Jan Beulich
@ 2014-05-08 11:16 ` Tim Deegan
  2 siblings, 0 replies; 5+ messages in thread
From: Tim Deegan @ 2014-05-08 11:16 UTC (permalink / raw
  To: Jan Beulich; +Cc: xen-devel

At 14:18 +0100 on 06 May (1399382336), Jan Beulich wrote:
> 1: pass on errors from p2m_set_entry()
> 2: p2m_change_type() should pass on error from p2m_set_entry()
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2014-05-08 11:17 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-05-06 13:18 [PATCH v2 0/2] x86/P2M: pass on errors from p2m_set_entry() Jan Beulich
2014-05-06 13:33 ` [PATCH v2 1/2] " Jan Beulich
2014-05-06 14:09   ` Andrew Cooper
2014-05-06 13:34 ` [PATCH v2 2/2] x86/P2M: p2m_change_type() should pass on error " Jan Beulich
2014-05-08 11:16 ` [PATCH v2 0/2] x86/P2M: pass on errors " Tim Deegan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.