All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] nvmx: fix resource relinquish for nested VMX
@ 2012-08-23  3:11 Dongxiao Xu
  2012-08-24  8:45 ` Keir Fraser
  0 siblings, 1 reply; 3+ messages in thread
From: Dongxiao Xu @ 2012-08-23  3:11 UTC (permalink / raw
  To: xen-devel

The previous order of relinquish resource is:
relinquish_domain_resources() -> vcpu_destroy() -> nvmx_vcpu_destroy().
However some L1 resources like nv_vvmcx and io_bitmaps are free in
nvmx_vcpu_destroy(), therefore the relinquish_domain_resources()
will not reduce the refcnt of the domain to 0, therefore the latter
vcpu release functions will not be called.

To fix this issue, we need to release the nv_vvmcx and io_bitmaps in
relinquish_domain_resources().

Besides, after destroy the nested vcpu, we need to switch the vmx->vmcs
back to the L1 and let the vcpu_destroy() logic to free the L1 VMCS page.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
 xen/arch/x86/hvm/hvm.c             |    3 +++
 xen/arch/x86/hvm/vmx/vmx.c         |    3 ++-
 xen/arch/x86/hvm/vmx/vvmx.c        |   11 +++++++++++
 xen/include/asm-x86/hvm/hvm.h      |    1 +
 xen/include/asm-x86/hvm/vmx/vvmx.h |    1 +
 5 files changed, 18 insertions(+), 1 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7f8a025..0576a24 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -561,6 +561,9 @@ int hvm_domain_initialise(struct domain *d)
 
 void hvm_domain_relinquish_resources(struct domain *d)
 {
+    if ( hvm_funcs.nhvm_domain_relinquish_resources )
+        hvm_funcs.nhvm_domain_relinquish_resources(d);
+
     hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
     hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index ffb86c1..3ea7012 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1547,7 +1547,8 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
     .nhvm_vcpu_asid       = nvmx_vcpu_asid,
     .nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
     .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
-    .nhvm_intr_blocked    = nvmx_intr_blocked
+    .nhvm_intr_blocked    = nvmx_intr_blocked,
+    .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources
 };
 
 struct hvm_function_table * __init start_vmx(void)
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 2e0b79d..1f610eb 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -57,6 +57,9 @@ void nvmx_vcpu_destroy(struct vcpu *v)
 {
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
 
+    if ( nvcpu->nv_n1vmcx )
+        v->arch.hvm_vmx.vmcs = nvcpu->nv_n1vmcx;
+
     nvmx_purge_vvmcs(v);
     if ( nvcpu->nv_n2vmcx ) {
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
@@ -65,6 +68,14 @@ void nvmx_vcpu_destroy(struct vcpu *v)
     }
 }
  
+void nvmx_domain_relinquish_resources(struct domain *d)
+{
+    struct vcpu *v;
+
+    for_each_vcpu ( d, v )
+        nvmx_purge_vvmcs(v);
+}
+
 int nvmx_vcpu_reset(struct vcpu *v)
 {
     return 0;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 7243c4e..3592a8c 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -179,6 +179,7 @@ struct hvm_function_table {
     bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
 
     enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
+    void (*nhvm_domain_relinquish_resources)(struct domain *d);
 };
 
 extern struct hvm_function_table hvm_funcs;
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 995f9f4..bbc34e7 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -96,6 +96,7 @@ uint32_t nvmx_vcpu_asid(struct vcpu *v);
 enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
 int nvmx_intercepts_exception(struct vcpu *v, 
                               unsigned int trap, int error_code);
+void nvmx_domain_relinquish_resources(struct domain *d);
 
 int nvmx_handle_vmxon(struct cpu_user_regs *regs);
 int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] nvmx: fix resource relinquish for nested VMX
  2012-08-23  3:11 [PATCH] nvmx: fix resource relinquish for nested VMX Dongxiao Xu
@ 2012-08-24  8:45 ` Keir Fraser
  2012-08-27  0:55   ` Xu, Dongxiao
  0 siblings, 1 reply; 3+ messages in thread
From: Keir Fraser @ 2012-08-24  8:45 UTC (permalink / raw
  To: Dongxiao Xu, xen-devel

On 23/08/2012 04:11, "Dongxiao Xu" <dongxiao.xu@intel.com> wrote:

> The previous order of relinquish resource is:
> relinquish_domain_resources() -> vcpu_destroy() -> nvmx_vcpu_destroy().
> However some L1 resources like nv_vvmcx and io_bitmaps are free in
> nvmx_vcpu_destroy(), therefore the relinquish_domain_resources()
> will not reduce the refcnt of the domain to 0, therefore the latter
> vcpu release functions will not be called.
> 
> To fix this issue, we need to release the nv_vvmcx and io_bitmaps in
> relinquish_domain_resources().
> 
> Besides, after destroy the nested vcpu, we need to switch the vmx->vmcs
> back to the L1 and let the vcpu_destroy() logic to free the L1 VMCS page.
> 
> Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>

Couple of comments below.

> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
> index 2e0b79d..1f610eb 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -57,6 +57,9 @@ void nvmx_vcpu_destroy(struct vcpu *v)
>  {
>      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
>  
> +    if ( nvcpu->nv_n1vmcx )
> +        v->arch.hvm_vmx.vmcs = nvcpu->nv_n1vmcx;

Okay, this undoes the fork in nvmx_handle_vmxon()? A small code comment to
explain that would be handy.

>      nvmx_purge_vvmcs(v);

This call of nvmx_purge_vvmcs() is no longer needed, and should be removed?

 -- Keir

>      if ( nvcpu->nv_n2vmcx ) {
>          __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
> @@ -65,6 +68,14 @@ void nvmx_vcpu_destroy(struct vcpu *v)
>      }
>  }
>   
> +void nvmx_domain_relinquish_resources(struct domain *d)
> +{
> +    struct vcpu *v;
> +
> +    for_each_vcpu ( d, v )
> +        nvmx_purge_vvmcs(v);
> +}
> +
>  int nvmx_vcpu_reset(struct vcpu *v)
>  {
>      return 0;
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 7243c4e..3592a8c 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -179,6 +179,7 @@ struct hvm_function_table {
>      bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
>  
>      enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
> +    void (*nhvm_domain_relinquish_resources)(struct domain *d);
>  };
>  
>  extern struct hvm_function_table hvm_funcs;
> diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h
> b/xen/include/asm-x86/hvm/vmx/vvmx.h
> index 995f9f4..bbc34e7 100644
> --- a/xen/include/asm-x86/hvm/vmx/vvmx.h
> +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
> @@ -96,6 +96,7 @@ uint32_t nvmx_vcpu_asid(struct vcpu *v);
>  enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
>  int nvmx_intercepts_exception(struct vcpu *v,
>                                unsigned int trap, int error_code);
> +void nvmx_domain_relinquish_resources(struct domain *d);
>  
>  int nvmx_handle_vmxon(struct cpu_user_regs *regs);
>  int nvmx_handle_vmxoff(struct cpu_user_regs *regs);

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] nvmx: fix resource relinquish for nested VMX
  2012-08-24  8:45 ` Keir Fraser
@ 2012-08-27  0:55   ` Xu, Dongxiao
  0 siblings, 0 replies; 3+ messages in thread
From: Xu, Dongxiao @ 2012-08-27  0:55 UTC (permalink / raw
  To: Keir Fraser, xen-devel@lists.xen.org

> -----Original Message-----
> From: Keir Fraser [mailto:keir.xen@gmail.com] On Behalf Of Keir Fraser
> Sent: Friday, August 24, 2012 4:46 PM
> To: Xu, Dongxiao; xen-devel@lists.xen.org
> Subject: Re: [Xen-devel] [PATCH] nvmx: fix resource relinquish for nested VMX
> 
> On 23/08/2012 04:11, "Dongxiao Xu" <dongxiao.xu@intel.com> wrote:
> 
> > The previous order of relinquish resource is:
> > relinquish_domain_resources() -> vcpu_destroy() -> nvmx_vcpu_destroy().
> > However some L1 resources like nv_vvmcx and io_bitmaps are free in
> > nvmx_vcpu_destroy(), therefore the relinquish_domain_resources() will
> > not reduce the refcnt of the domain to 0, therefore the latter vcpu
> > release functions will not be called.
> >
> > To fix this issue, we need to release the nv_vvmcx and io_bitmaps in
> > relinquish_domain_resources().
> >
> > Besides, after destroy the nested vcpu, we need to switch the
> > vmx->vmcs back to the L1 and let the vcpu_destroy() logic to free the L1
> VMCS page.
> >
> > Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
> 
> Couple of comments below.
> 
> > diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
> > index 2e0b79d..1f610eb 100644
> > --- a/xen/arch/x86/hvm/vmx/vvmx.c
> > +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> > @@ -57,6 +57,9 @@ void nvmx_vcpu_destroy(struct vcpu *v)  {
> >      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
> >
> > +    if ( nvcpu->nv_n1vmcx )
> > +        v->arch.hvm_vmx.vmcs = nvcpu->nv_n1vmcx;
> 
> Okay, this undoes the fork in nvmx_handle_vmxon()? A small code comment to
> explain that would be handy.

Consider the following case:
When the vcpu is representing the L2 guest, therefore the v->arch.hvm_vmx.vmcs points to the L2's VMCS (as known as the shadow VMCS, nvcpu->nv_n2vmcx), and at this time, user destroy the L1 guest by "xl destroy", we need to set the v->arch.hvm_vmx.vmcs back to L1's VMCS, otherwise, L2's VMCS will be free twice and keep L1's VMCS un-freed.
I will add a comment the code.

> 
> >      nvmx_purge_vvmcs(v);
> 
> This call of nvmx_purge_vvmcs() is no longer needed, and should be removed?
Yes, this could be removed. I will send out a new version.

Thanks,
Dongxiao


> 
>  -- Keir
> 
> >      if ( nvcpu->nv_n2vmcx ) {
> >          __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
> > @@ -65,6 +68,14 @@ void nvmx_vcpu_destroy(struct vcpu *v)
> >      }
> >  }
> >
> > +void nvmx_domain_relinquish_resources(struct domain *d) {
> > +    struct vcpu *v;
> > +
> > +    for_each_vcpu ( d, v )
> > +        nvmx_purge_vvmcs(v);
> > +}
> > +
> >  int nvmx_vcpu_reset(struct vcpu *v)
> >  {
> >      return 0;
> > diff --git a/xen/include/asm-x86/hvm/hvm.h
> > b/xen/include/asm-x86/hvm/hvm.h index 7243c4e..3592a8c 100644
> > --- a/xen/include/asm-x86/hvm/hvm.h
> > +++ b/xen/include/asm-x86/hvm/hvm.h
> > @@ -179,6 +179,7 @@ struct hvm_function_table {
> >      bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
> >
> >      enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
> > +    void (*nhvm_domain_relinquish_resources)(struct domain *d);
> >  };
> >
> >  extern struct hvm_function_table hvm_funcs; diff --git
> > a/xen/include/asm-x86/hvm/vmx/vvmx.h
> > b/xen/include/asm-x86/hvm/vmx/vvmx.h
> > index 995f9f4..bbc34e7 100644
> > --- a/xen/include/asm-x86/hvm/vmx/vvmx.h
> > +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
> > @@ -96,6 +96,7 @@ uint32_t nvmx_vcpu_asid(struct vcpu *v);  enum
> > hvm_intblk nvmx_intr_blocked(struct vcpu *v);  int
> > nvmx_intercepts_exception(struct vcpu *v,
> >                                unsigned int trap, int error_code);
> > +void nvmx_domain_relinquish_resources(struct domain *d);
> >
> >  int nvmx_handle_vmxon(struct cpu_user_regs *regs);  int
> > nvmx_handle_vmxoff(struct cpu_user_regs *regs);
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2012-08-27  0:55 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-08-23  3:11 [PATCH] nvmx: fix resource relinquish for nested VMX Dongxiao Xu
2012-08-24  8:45 ` Keir Fraser
2012-08-27  0:55   ` Xu, Dongxiao

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.