All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2 12/17] x86/hvm: use ioreq_t to track in-flight state
Date: Thu, 11 Jun 2015 16:42:56 +0100	[thread overview]
Message-ID: <1434037381-10917-13-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1434037381-10917-1-git-send-email-paul.durrant@citrix.com>

Use an ioreq_t rather than open coded state, size, dir and data fields
in struct hvm_vcpu_io. This also allows PIO completion to be handled
similarly to MMIO completion by re-issuing the handle_pio() call.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/hvm/emulate.c       |  149 +++++++++++++++++++++-----------------
 xen/arch/x86/hvm/hvm.c           |   19 ++---
 xen/arch/x86/hvm/io.c            |    2 +-
 xen/arch/x86/hvm/svm/nestedsvm.c |    2 +-
 xen/arch/x86/hvm/vmx/realmode.c  |    6 +-
 xen/include/asm-x86/hvm/vcpu.h   |    5 +-
 6 files changed, 93 insertions(+), 90 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 1c34288..8dd02af 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -93,6 +93,7 @@ static int hvmemul_do_io(
         .df = df,
         .data = data,
         .data_is_ptr = data_is_addr, /* ioreq_t field name is misleading */
+        .state = STATE_IOREQ_READY,
     };
     void *p_data = (void *)data;
     int rc;
@@ -130,21 +131,79 @@ static int hvmemul_do_io(
         }
     }
 
-    switch ( vio->io_state )
+    switch ( vio->io_req.state )
     {
     case STATE_IOREQ_NONE:
+        vio->io_req = p;
         break;
     case STATE_IORESP_READY:
-        vio->io_state = STATE_IOREQ_NONE;
-        goto finish_access;
+        p = vio->io_req;
+
+        if ( (p.type != is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO) ||
+             (p.addr != addr) ||
+             (p.size != size) ||
+             (p.count != *reps) ||
+             (p.dir != dir) ||
+             (p.df != df) ||
+             (p.data_is_ptr != data_is_addr) )
+        {
+            gdprintk(XENLOG_WARNING, "mismatched request\n");
+            domain_crash(curr->domain);
+
+            vio->io_req.state = STATE_IOREQ_NONE;
+            return X86EMUL_UNHANDLEABLE;
+        }                             
+
+ resp_ready:
+        vio->io_req.state = STATE_IOREQ_NONE;
+
+        if ( dir == IOREQ_READ )
+        {
+            hvmtrace_io_assist(is_mmio, &p);
+
+            if ( !data_is_addr )
+                memcpy(p_data, &p.data, size);
+        }
+
+        if ( is_mmio && !data_is_addr )
+        {
+            /* Part of a multi-cycle read or write? */
+            if ( dir == IOREQ_WRITE )
+            {
+                paddr_t pa = vio->mmio_large_write_pa;
+                unsigned int bytes = vio->mmio_large_write_bytes;
+                if ( bytes == 0 )
+                    pa = vio->mmio_large_write_pa = addr;
+                if ( addr == (pa + bytes) )
+                    vio->mmio_large_write_bytes += size;
+            }
+            else
+            {
+                paddr_t pa = vio->mmio_large_read_pa;
+                unsigned int bytes = vio->mmio_large_read_bytes;
+                if ( bytes == 0 )
+                    pa = vio->mmio_large_read_pa = addr;
+                if ( (addr == (pa + bytes)) &&
+                     ((bytes + size) <= sizeof(vio->mmio_large_read)) )
+                {
+                    memcpy(&vio->mmio_large_read[addr - pa], p_data,
+                           size);
+                    vio->mmio_large_read_bytes += size;
+                }
+            }
+        }
+
+        return X86EMUL_OKAY;
     default:
+        /*
+         * This function should never be called unless
+         * vio->io_req.state matches the above cases.
+         */
+        gdprintk(XENLOG_WARNING, "bad emulation state\n");
+        domain_crash(curr->domain);
         return X86EMUL_UNHANDLEABLE;
     }
 
-    vio->io_state = STATE_IOREQ_READY;
-    vio->io_size = size;
-    vio->io_dir = dir;
-
     if ( dir == IOREQ_WRITE )
     {
         if ( !data_is_addr )
@@ -155,77 +214,31 @@ static int hvmemul_do_io(
 
     rc = hvm_io_intercept(&p);
 
-    switch ( rc )
-    {
-    case X86EMUL_OKAY:
-        vio->io_data = p.data;
-        vio->io_state = STATE_IOREQ_NONE;
-        break;
-    case X86EMUL_UNHANDLEABLE:
+    if ( rc == X86EMUL_UNHANDLEABLE )
     {
         struct hvm_ioreq_server *s =
             hvm_select_ioreq_server(curr->domain, &p);
 
         /* If there is no suitable backing DM, just ignore accesses */
-        if ( !s )
-        {
-            rc = process_io_intercept(curr, &p, &null_handler);
-            if ( rc == X86EMUL_OKAY )
-                vio->io_data = p.data;
-            vio->io_state = STATE_IOREQ_NONE;
-        }
-        else
-        {
-            rc = hvm_send_assist_req(s, &p);
-            if ( rc != X86EMUL_RETRY )
-                vio->io_state = STATE_IOREQ_NONE;
-        }
-        break;
-    }
-    default:
-        BUG();
+        rc = !s ?
+            process_io_intercept(curr, &p, &null_handler) :
+            hvm_send_assist_req(s, &p);
     }
 
-    if ( rc != X86EMUL_OKAY )
-        return rc;
-
- finish_access:
-    if ( dir == IOREQ_READ )
-    {
-        hvmtrace_io_assist(is_mmio, &p);
-
-        if ( !data_is_addr )
-            memcpy(p_data, &vio->io_data, size);
-    }
-
-    if ( is_mmio && !data_is_addr )
+    switch ( rc )
     {
-        /* Part of a multi-cycle read or write? */
-        if ( dir == IOREQ_WRITE )
-        {
-            paddr_t pa = vio->mmio_large_write_pa;
-            unsigned int bytes = vio->mmio_large_write_bytes;
-            if ( bytes == 0 )
-                pa = vio->mmio_large_write_pa = addr;
-            if ( addr == (pa + bytes) )
-                vio->mmio_large_write_bytes += size;
-        }
-        else
-        {
-            paddr_t pa = vio->mmio_large_read_pa;
-            unsigned int bytes = vio->mmio_large_read_bytes;
-            if ( bytes == 0 )
-                pa = vio->mmio_large_read_pa = addr;
-            if ( (addr == (pa + bytes)) &&
-                 ((bytes + size) <= sizeof(vio->mmio_large_read)) )
-            {
-                memcpy(&vio->mmio_large_read[bytes], p_data, size);
-                vio->mmio_large_read_bytes += size;
-            }
-        }
+    case X86EMUL_OKAY:
+        goto resp_ready;
+    case X86EMUL_UNHANDLEABLE:
+        vio->io_req.state = STATE_IOREQ_NONE;
+        break;
+    case X86EMUL_RETRY:
+        break;
+    default:
+        BUG();
     }
 
-    return X86EMUL_OKAY;
+    return rc;
 }
 
 int hvmemul_do_io_buffer(
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 85944f6..48711ab 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -423,11 +423,11 @@ static void hvm_io_assist(ioreq_t *p)
      * This function should never be called unless an I/O emulation
      * is awating completion.
      */
-    if (vio->io_state != STATE_IOREQ_READY)
+    if (vio->io_req.state != STATE_IOREQ_READY)
         domain_crash(curr->domain);
 
-    vio->io_state = STATE_IORESP_READY;
-    vio->io_data = p->data;
+    vio->io_req.state = STATE_IORESP_READY;
+    vio->io_req.data = p->data;
     vio->io_completion = HVMIO_no_completion;
 
     switch ( completion )
@@ -437,15 +437,8 @@ static void hvm_io_assist(ioreq_t *p)
         break;
 
     case HVMIO_pio_completion:
-        if ( vio->io_dir == IOREQ_READ )
-        {
-            if ( vio->io_size == 4 ) /* Needs zero extension. */
-                guest_cpu_user_regs()->rax = (uint32_t)p->data;
-            else
-                memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
-        }
-
-        vio->io_state = STATE_IOREQ_NONE;
+        (void)handle_pio(vio->io_req.addr, vio->io_req.size,
+                         vio->io_req.dir);
         break;
     default:
         break;
@@ -455,7 +448,7 @@ static void hvm_io_assist(ioreq_t *p)
      * Re-emulation may have scheduled another I/O so io_state set
      * at the top of the function may have changed.
      */
-    if ( vio->io_state == STATE_IOREQ_NONE )
+    if ( vio->io_req.state == STATE_IOREQ_NONE )
     {
         msix_write_completion(curr);
         vcpu_end_shutdown_deferral(curr);
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index b09b369..e31164e 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -103,7 +103,7 @@ int handle_mmio(void)
         hvm_dump_emulation_state(XENLOG_G_WARNING "MMIO", &ctxt);
         return 0;
     case X86EMUL_EXCEPTION:
-        vio->io_state = STATE_IOREQ_NONE;
+        vio->io_req.state = STATE_IOREQ_NONE;
         vio->mmio_access = (struct npfec){};
         if ( ctxt.exn_pending )
             hvm_inject_trap(&ctxt.trap);
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 8b165c6..78667a2 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1231,7 +1231,7 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
          * Delay the injection because this would result in delivering
          * an interrupt *within* the execution of an instruction.
          */
-        if ( v->arch.hvm_vcpu.hvm_io.io_state != STATE_IOREQ_NONE )
+        if ( v->arch.hvm_vcpu.hvm_io.io_req.state != STATE_IOREQ_NONE )
             return hvm_intblk_shadow;
 
         if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 8c2da9a..69c0297 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -177,7 +177,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
 
     hvm_emulate_prepare(&hvmemul_ctxt, regs);
 
-    if ( vio->io_state == STATE_IORESP_READY )
+    if ( vio->io_req.state == STATE_IORESP_READY )
         realmode_emulate_one(&hvmemul_ctxt);
 
     /* Only deliver interrupts into emulated real mode. */
@@ -191,7 +191,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
     curr->arch.hvm_vmx.vmx_emulate = 1;
     while ( curr->arch.hvm_vmx.vmx_emulate &&
             !softirq_pending(smp_processor_id()) &&
-            (vio->io_state == STATE_IOREQ_NONE) )
+            (vio->io_req.state == STATE_IOREQ_NONE) )
     {
         /*
          * Check for pending interrupts only every 16 instructions, because
@@ -216,7 +216,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
     }
 
     /* Need to emulate next time if we've started an IO operation */
-    if ( vio->io_state != STATE_IOREQ_NONE )
+    if ( vio->io_req.state != STATE_IOREQ_NONE )
         curr->arch.hvm_vmx.vmx_emulate = 1;
 
     if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 1c2ec27..46e89e4 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -43,10 +43,7 @@ struct hvm_vcpu_asid {
 
 struct hvm_vcpu_io {
     /* I/O request in flight to device model. */
-    uint8_t                io_state;
-    unsigned long          io_data;
-    int                    io_size;
-    int                    io_dir;
+    ioreq_t                io_req;
     enum hvm_io_completion io_completion;
 
     /*
-- 
1.7.10.4

  parent reply	other threads:[~2015-06-11 15:51 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-11 15:42 [PATCH v2 00/17] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-06-11 15:42 ` [PATCH v2 01/17] x86/hvm: simplify hvmemul_do_io() Paul Durrant
2015-06-17 13:31   ` Jan Beulich
2015-06-17 13:54     ` Paul Durrant
2015-06-17 14:47       ` Jan Beulich
2015-06-17 14:55         ` Paul Durrant
2015-06-17 14:59           ` Jan Beulich
2015-06-11 15:42 ` [PATCH v2 02/17] x86/hvm: re-name struct hvm_mmio_handler to hvm_mmio_ops Paul Durrant
2015-06-17 12:43   ` Jan Beulich
2015-06-17 12:45     ` Paul Durrant
2015-06-11 15:42 ` [PATCH v2 03/17] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-06-17 14:22   ` Jan Beulich
2015-06-17 14:40     ` Paul Durrant
2015-06-17 14:55       ` Jan Beulich
2015-06-11 15:42 ` [PATCH v2 04/17] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-06-17 14:36   ` Jan Beulich
2015-06-17 14:46     ` Paul Durrant
2015-06-17 14:58       ` Jan Beulich
2015-06-17 15:17         ` Paul Durrant
2015-06-11 15:42 ` [PATCH v2 05/17] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-06-17 15:50   ` Jan Beulich
2015-06-17 16:30     ` Paul Durrant
2015-06-18  6:23       ` Jan Beulich
2015-06-11 15:42 ` [PATCH v2 06/17] x86/hvm: revert 82ed8716b "fix direct PCI port I/O emulation retry Paul Durrant
2015-06-17 10:54   ` Paul Durrant
2015-06-11 15:42 ` [PATCH v2 07/17] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-06-11 15:42 ` [PATCH v2 08/17] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-06-11 15:42 ` [PATCH v2 09/17] x86/hvm: remove hvm_io_pending() check in hvmemul_do_io() Paul Durrant
2015-06-11 15:42 ` [PATCH v2 10/17] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-06-11 15:42 ` [PATCH v2 11/17] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-06-11 15:42 ` Paul Durrant [this message]
2015-06-11 15:42 ` [PATCH v2 13/17] x86/hvm: only acquire RAM pages for emulation when we need to Paul Durrant
2015-06-11 15:42 ` [PATCH v2 14/17] x86/hvm: remove extraneous parameter from hvmtrace_io_assist() Paul Durrant
2015-06-11 15:42 ` [PATCH v2 15/17] x86/hvm: make sure translated MMIO reads or writes fall within a page Paul Durrant
2015-06-11 15:43 ` [PATCH v2 16/17] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-06-11 15:43 ` [PATCH v2 17/17] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-06-12 10:44 ` [PATCH v2 00/17] x86/hvm: I/O emulation cleanup and fix Fabio Fantoni
2015-06-12 11:45   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1434037381-10917-13-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.