Xen-Devel Archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86/hvm: add support for broadcast of buffered ioreqs...
@ 2015-07-10 13:45 Paul Durrant
  2015-07-10 15:38 ` Jan Beulich
  0 siblings, 1 reply; 4+ messages in thread
From: Paul Durrant @ 2015-07-10 13:45 UTC (permalink / raw
  To: xen-devel; +Cc: Andrew Cooper, Paul Durrant, Keir Fraser, Jan Beulich

...and make RTC timeoffset ioreqs use it.

Without this patch RTC timeoffset updates go nowhere and Xen complains
with a (non-rate-limited) printk.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/hvm/emulate.c    |    2 +-
 xen/arch/x86/hvm/hvm.c        |   25 +++++++++++++------------
 xen/arch/x86/hvm/io.c         |    5 ++---
 xen/arch/x86/hvm/stdvga.c     |    8 +++++---
 xen/include/asm-x86/hvm/hvm.h |    4 ++--
 xen/include/asm-x86/hvm/io.h  |    1 -
 6 files changed, 23 insertions(+), 22 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 01ee972..795321c 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -161,7 +161,7 @@ static int hvmemul_do_io(
         }
         else
         {
-            rc = hvm_send_assist_req(s, &p);
+            rc = hvm_send_ioreq(s, &p, 0);
             if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
                 vio->io_req.state = STATE_IOREQ_NONE;
             else if ( data_is_addr )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ebcf7a9..09dbb88 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2561,10 +2561,9 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
     return d->arch.hvm_domain.default_ioreq_server;
 }
 
-int hvm_buffered_io_send(ioreq_t *p)
+static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
 {
     struct domain *d = current->domain;
-    struct hvm_ioreq_server *s = hvm_select_ioreq_server(d, p);
     struct hvm_ioreq_page *iorp;
     buffered_iopage_t *pg;
     buf_ioreq_t bp = { .data = p->data,
@@ -2577,14 +2576,11 @@ int hvm_buffered_io_send(ioreq_t *p)
     /* Ensure buffered_iopage fits in a page */
     BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
 
-    if ( !s )
-        return 0;
-
     iorp = &s->bufioreq;
     pg = iorp->va;
 
     if ( !pg )
-        return 0;
+        return X86EMUL_UNHANDLEABLE;
 
     /*
      * Return 0 for the cases we can't deal with:
@@ -2614,7 +2610,7 @@ int hvm_buffered_io_send(ioreq_t *p)
         break;
     default:
         gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
-        return 0;
+        return X86EMUL_UNHANDLEABLE;
     }
 
     spin_lock(&s->bufioreq_lock);
@@ -2624,7 +2620,7 @@ int hvm_buffered_io_send(ioreq_t *p)
     {
         /* The queue is full: send the iopacket through the normal path. */
         spin_unlock(&s->bufioreq_lock);
-        return 0;
+        return X86EMUL_UNHANDLEABLE;
     }
 
     pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
@@ -2654,16 +2650,21 @@ int hvm_buffered_io_send(ioreq_t *p)
     notify_via_xen_event_channel(d, s->bufioreq_evtchn);
     spin_unlock(&s->bufioreq_lock);
 
-    return 1;
+    return X86EMUL_OKAY;
 }
 
-int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
+int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
+                   bool_t buffered)
 {
     struct vcpu *curr = current;
     struct domain *d = curr->domain;
     struct hvm_ioreq_vcpu *sv;
 
     ASSERT(s);
+
+    if ( buffered )
+        return hvm_send_buffered_ioreq(s, proto_p);
+
     if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
         return X86EMUL_RETRY;
 
@@ -2710,7 +2711,7 @@ int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
     return X86EMUL_UNHANDLEABLE;
 }
 
-void hvm_broadcast_assist_req(ioreq_t *p)
+void hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
 {
     struct domain *d = current->domain;
     struct hvm_ioreq_server *s;
@@ -2720,7 +2721,7 @@ void hvm_broadcast_assist_req(ioreq_t *p)
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
-        (void) hvm_send_assist_req(s, p);
+        hvm_send_ioreq(s, p, buffered);
 }
 
 void hvm_hlt(unsigned long rflags)
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 3b51d59..603711c 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -60,8 +60,7 @@ void send_timeoffset_req(unsigned long timeoff)
     if ( timeoff == 0 )
         return;
 
-    if ( !hvm_buffered_io_send(&p) )
-        printk("Unsuccessful timeoffset update\n");
+    hvm_broadcast_ioreq(&p, 1);
 }
 
 /* Ask ioemu mapcache to invalidate mappings. */
@@ -74,7 +73,7 @@ void send_invalidate_req(void)
         .data = ~0UL, /* flush all */
     };
 
-    hvm_broadcast_assist_req(&p);
+    hvm_broadcast_ioreq(&p, 0);
 }
 
 int handle_mmio(void)
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index 4a7593d..8222af3 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -439,6 +439,7 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
         .dir = IOREQ_WRITE,
         .data = data,
     };
+    struct hvm_ioreq_server *srv;
 
     if ( !s->cache )
         goto done;
@@ -479,10 +480,11 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
     }
 
  done:
-    if ( hvm_buffered_io_send(&p) )
-        return X86EMUL_OKAY;
+    srv = hvm_select_ioreq_server(current->domain, &p);
+    if ( !srv )
+        return X86EMUL_UNHANDLEABLE;
 
-    return X86EMUL_UNHANDLEABLE;
+    return hvm_send_ioreq(srv, &p, 1);
 }
 
 static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 35f1300..05e397b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -226,8 +226,8 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
 
 struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
                                                  ioreq_t *p);
-int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
-void hvm_broadcast_assist_req(ioreq_t *p);
+int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *p, bool_t buffered);
+void hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered);
 
 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
 int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 577b21a..cf46689 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -117,7 +117,6 @@ void relocate_portio_handler(
     struct domain *d, unsigned int old_port, unsigned int new_port,
     unsigned int size);
 
-int hvm_buffered_io_send(ioreq_t *p);
 void send_timeoffset_req(unsigned long timeoff);
 void send_invalidate_req(void);
 int handle_mmio(void);
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] x86/hvm: add support for broadcast of buffered ioreqs...
  2015-07-10 13:45 [PATCH] x86/hvm: add support for broadcast of buffered ioreqs Paul Durrant
@ 2015-07-10 15:38 ` Jan Beulich
  2015-07-10 15:42   ` Paul Durrant
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2015-07-10 15:38 UTC (permalink / raw
  To: Paul Durrant; +Cc: Andrew Cooper, Keir Fraser, xen-devel

>>> On 10.07.15 at 15:45, <paul.durrant@citrix.com> wrote:
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -60,8 +60,7 @@ void send_timeoffset_req(unsigned long timeoff)
>      if ( timeoff == 0 )
>          return;
>  
> -    if ( !hvm_buffered_io_send(&p) )
> -        printk("Unsuccessful timeoffset update\n");
> +    hvm_broadcast_ioreq(&p, 1);
>  }

The rest of the patch looks okay, but I'm not happy with the deletion
of this message, as it served a purpose (ignoring the fact that one
didn't know the affected domain etc). I would think
hvm_broadcast_ioreq() should have a return value, indicating all
succeeded, some succeeded, or all failed. And perhaps servers
without bufioreq page should then rather count as "succeeded".

Otoh I can see that "some succeeded" may not be really useful
here, as the caller won't know whether the one(s) that failed are
important, or whether they would have dropped the notification
on the floor only anyway (like qemuu appears to do right now).

Jan

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] x86/hvm: add support for broadcast of buffered ioreqs...
  2015-07-10 15:38 ` Jan Beulich
@ 2015-07-10 15:42   ` Paul Durrant
  2015-07-10 15:47     ` Jan Beulich
  0 siblings, 1 reply; 4+ messages in thread
From: Paul Durrant @ 2015-07-10 15:42 UTC (permalink / raw
  To: Jan Beulich; +Cc: Andrew Cooper, Keir (Xen.org), xen-devel@lists.xen.org

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 10 July 2015 16:39
> To: Paul Durrant
> Cc: Andrew Cooper; xen-devel@lists.xen.org; Keir (Xen.org)
> Subject: Re: [PATCH] x86/hvm: add support for broadcast of buffered
> ioreqs...
> 
> >>> On 10.07.15 at 15:45, <paul.durrant@citrix.com> wrote:
> > --- a/xen/arch/x86/hvm/io.c
> > +++ b/xen/arch/x86/hvm/io.c
> > @@ -60,8 +60,7 @@ void send_timeoffset_req(unsigned long timeoff)
> >      if ( timeoff == 0 )
> >          return;
> >
> > -    if ( !hvm_buffered_io_send(&p) )
> > -        printk("Unsuccessful timeoffset update\n");
> > +    hvm_broadcast_ioreq(&p, 1);
> >  }
> 
> The rest of the patch looks okay, but I'm not happy with the deletion
> of this message, as it served a purpose (ignoring the fact that one
> didn't know the affected domain etc). I would think
> hvm_broadcast_ioreq() should have a return value, indicating all
> succeeded, some succeeded, or all failed. And perhaps servers
> without bufioreq page should then rather count as "succeeded".
> 
> Otoh I can see that "some succeeded" may not be really useful
> here, as the caller won't know whether the one(s) that failed are
> important, or whether they would have dropped the notification
> on the floor only anyway (like qemuu appears to do right now).
> 

Ok, how about I just have it return a value indicating if at least one send failed, and then add a gprintk() on that?

  Paul

> Jan

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] x86/hvm: add support for broadcast of buffered ioreqs...
  2015-07-10 15:42   ` Paul Durrant
@ 2015-07-10 15:47     ` Jan Beulich
  0 siblings, 0 replies; 4+ messages in thread
From: Jan Beulich @ 2015-07-10 15:47 UTC (permalink / raw
  To: Paul Durrant; +Cc: Andrew Cooper, Keir (Xen.org), xen-devel@lists.xen.org

>>> On 10.07.15 at 17:42, <Paul.Durrant@citrix.com> wrote:
>>  -----Original Message-----
>> From: Jan Beulich [mailto:JBeulich@suse.com]
>> Sent: 10 July 2015 16:39
>> To: Paul Durrant
>> Cc: Andrew Cooper; xen-devel@lists.xen.org; Keir (Xen.org)
>> Subject: Re: [PATCH] x86/hvm: add support for broadcast of buffered
>> ioreqs...
>> 
>> >>> On 10.07.15 at 15:45, <paul.durrant@citrix.com> wrote:
>> > --- a/xen/arch/x86/hvm/io.c
>> > +++ b/xen/arch/x86/hvm/io.c
>> > @@ -60,8 +60,7 @@ void send_timeoffset_req(unsigned long timeoff)
>> >      if ( timeoff == 0 )
>> >          return;
>> >
>> > -    if ( !hvm_buffered_io_send(&p) )
>> > -        printk("Unsuccessful timeoffset update\n");
>> > +    hvm_broadcast_ioreq(&p, 1);
>> >  }
>> 
>> The rest of the patch looks okay, but I'm not happy with the deletion
>> of this message, as it served a purpose (ignoring the fact that one
>> didn't know the affected domain etc). I would think
>> hvm_broadcast_ioreq() should have a return value, indicating all
>> succeeded, some succeeded, or all failed. And perhaps servers
>> without bufioreq page should then rather count as "succeeded".
>> 
>> Otoh I can see that "some succeeded" may not be really useful
>> here, as the caller won't know whether the one(s) that failed are
>> important, or whether they would have dropped the notification
>> on the floor only anyway (like qemuu appears to do right now).
>> 
> 
> Ok, how about I just have it return a value indicating if at least one send 
> failed, and then add a gprintk() on that?

Sounds reasonable.

Jan

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-07-10 15:47 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-07-10 13:45 [PATCH] x86/hvm: add support for broadcast of buffered ioreqs Paul Durrant
2015-07-10 15:38 ` Jan Beulich
2015-07-10 15:42   ` Paul Durrant
2015-07-10 15:47     ` Jan Beulich

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).