All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2 16/17] x86/hvm: remove multiple open coded 'chunking' loops
Date: Thu, 11 Jun 2015 16:43:00 +0100	[thread overview]
Message-ID: <1434037381-10917-17-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1434037381-10917-1-git-send-email-paul.durrant@citrix.com>

...in hvmemul_read/write()

Add hvmemul_phys_mmio_access() and hvmemul_linear_mmio_access() functions
to reduce code duplication.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/hvm/emulate.c |  244 +++++++++++++++++++++++++-------------------
 1 file changed, 140 insertions(+), 104 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 0d748e7..467f2da 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -619,6 +619,127 @@ static int hvmemul_virtual_to_linear(
     return X86EMUL_EXCEPTION;
 }
 
+static int hvmemul_phys_mmio_access(paddr_t gpa,
+                                    unsigned int size,
+                                    uint8_t dir,
+                                    uint8_t *buffer,
+                                    unsigned int *off)
+{
+    unsigned long one_rep = 1;
+    unsigned int chunk;
+    int rc = 0;
+
+    /* Accesses must fall within a page */
+    if ( (gpa & (PAGE_SIZE - 1)) + size > PAGE_SIZE )
+        return X86EMUL_UNHANDLEABLE;
+
+    /*
+     * hvmemul_do_io() cannot handle non-power-of-2 accesses or
+     * accesses larger than sizeof(long), so choose the highest power
+     * of 2 not exceeding sizeof(long) as the 'chunk' size.
+     */
+    chunk = 1 << (fls(size) - 1);
+    if ( chunk > sizeof (long) )
+        chunk = sizeof (long);
+
+    while ( size != 0 )
+    {
+        rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
+                                    &buffer[*off]);
+        if ( rc != X86EMUL_OKAY )
+            break;
+
+        /* Advance to the next chunk */
+        gpa += chunk;
+        *off += chunk;
+        size -= chunk;
+
+        /*
+         * If the chunk now exceeds the remaining size, choose the next
+         * lowest power of 2 that will fit.
+         */
+        while ( chunk > size )
+            chunk >>= 1;
+    }
+
+    return rc;
+}
+
+static int hvmemul_phys_mmio_read(paddr_t gpa,
+                                  unsigned int size,
+                                  uint8_t *buffer,
+                                  unsigned int *off)
+{
+    return hvmemul_phys_mmio_access(gpa, size, IOREQ_READ, buffer,
+                                    off);
+}
+
+static int hvmemul_phys_mmio_write(paddr_t gpa,
+                                   unsigned int size,
+                                   uint8_t *buffer,
+                                   unsigned int *off)
+{
+    return hvmemul_phys_mmio_access(gpa, size, IOREQ_WRITE, buffer,
+                                    off);
+}
+
+static int hvmemul_linear_mmio_access(unsigned long mmio_addr,
+                                      unsigned int size,
+                                      uint8_t dir,
+                                      uint8_t *buffer,
+                                      uint32_t pfec,
+                                      struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    unsigned long page_off = mmio_addr & (PAGE_SIZE - 1);
+    unsigned int chunk, buffer_off = 0;
+    paddr_t gpa;
+    unsigned long one_rep = 1;
+    int rc;
+
+    chunk = min_t(unsigned int, size, PAGE_SIZE - page_off);
+    rc = hvmemul_linear_to_phys(mmio_addr, &gpa, chunk,
+                                &one_rep, pfec, hvmemul_ctxt);
+    while ( rc == X86EMUL_OKAY )
+    {
+        rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer,
+                                      &buffer_off);
+        if ( rc != X86EMUL_OKAY )
+            break;
+
+        mmio_addr += chunk;
+        size -= chunk;
+
+        if ( size == 0 )
+            break;
+
+        chunk = min_t(unsigned int, size, PAGE_SIZE);
+        rc = hvmemul_linear_to_phys(mmio_addr, &gpa, chunk,
+                                    &one_rep, pfec, hvmemul_ctxt);
+    }
+
+    return rc;
+}
+
+static int hvmemul_linear_mmio_read(unsigned long mmio_addr,
+                                    unsigned int size,
+                                    uint8_t *buffer,
+                                    uint32_t pfec,
+                                    struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    return hvmemul_linear_mmio_access(mmio_addr, size, IOREQ_READ, buffer,
+                                      pfec, hvmemul_ctxt);
+}
+
+static int hvmemul_linear_mmio_write(unsigned long mmio_addr,
+                                     unsigned int size,
+                                     uint8_t *buffer,
+                                     uint32_t pfec,
+                                     struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    return hvmemul_linear_mmio_access(mmio_addr, size, IOREQ_WRITE, buffer,
+                                      pfec, hvmemul_ctxt);
+}
+
 static int __hvmemul_read(
     enum x86_segment seg,
     unsigned long offset,
@@ -628,52 +749,28 @@ static int __hvmemul_read(
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     struct vcpu *curr = current;
-    unsigned long addr, reps = 1;
-    unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
+    unsigned long addr, one_rep = 1;
     uint32_t pfec = PFEC_page_present;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     paddr_t gpa;
     int rc;
 
     rc = hvmemul_virtual_to_linear(
-        seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
+        seg, offset, bytes, &one_rep, access_type, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY )
         return rc;
-    off = addr & (PAGE_SIZE - 1);
-    /*
-     * We only need to handle sizes actual instruction operands can have. All
-     * such sizes are either powers of 2 or the sum of two powers of 2. Thus
-     * picking as initial chunk size the largest power of 2 not greater than
-     * the total size will always result in only power-of-2 size requests
-     * issued to hvmemul_do_mmio() (hvmemul_do_io() rejects non-powers-of-2).
-     */
-    while ( chunk & (chunk - 1) )
-        chunk &= chunk - 1;
-    if ( off + bytes > PAGE_SIZE )
-        while ( off & (chunk - 1) )
-            chunk >>= 1;
 
     if ( ((access_type != hvm_access_insn_fetch
            ? vio->mmio_access.read_access
            : vio->mmio_access.insn_fetch)) &&
          (vio->mmio_gva == (addr & PAGE_MASK)) )
     {
-        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
-        while ( (off + chunk) <= PAGE_SIZE )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                return rc;
-            off += chunk;
-            gpa += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-        }
+        unsigned int off = 0;
 
-        return X86EMUL_UNHANDLEABLE;
+        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) |
+               (addr & (PAGE_SIZE - 1)));
+
+        return hvmemul_phys_mmio_read(gpa, bytes, p_data, &off);
     }
 
     if ( (seg != x86_seg_none) &&
@@ -693,30 +790,9 @@ static int __hvmemul_read(
     case HVMCOPY_bad_gfn_to_mfn:
         if ( access_type == hvm_access_insn_fetch )
             return X86EMUL_UNHANDLEABLE;
-        rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                    hvmemul_ctxt);
-        while ( rc == X86EMUL_OKAY )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                break;
-            addr += chunk;
-            off += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-            if ( off < PAGE_SIZE )
-                gpa += chunk;
-            else
-            {
-                rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                            hvmemul_ctxt);
-                off = 0;
-            }
-        }
-        return rc;
+
+        return hvmemul_linear_mmio_read(addr, bytes, p_data,
+                                        pfec, hvmemul_ctxt);
     case HVMCOPY_gfn_paged_out:
     case HVMCOPY_gfn_shared:
         return X86EMUL_RETRY;
@@ -781,44 +857,26 @@ static int hvmemul_write(
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     struct vcpu *curr = current;
-    unsigned long addr, reps = 1;
-    unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
+    unsigned long addr, one_rep = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     paddr_t gpa;
     int rc;
 
     rc = hvmemul_virtual_to_linear(
-        seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
+        seg, offset, bytes, &one_rep, hvm_access_write, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY )
         return rc;
-    off = addr & (PAGE_SIZE - 1);
-    /* See the respective comment in __hvmemul_read(). */
-    while ( chunk & (chunk - 1) )
-        chunk &= chunk - 1;
-    if ( off + bytes > PAGE_SIZE )
-        while ( off & (chunk - 1) )
-            chunk >>= 1;
 
     if ( vio->mmio_access.write_access &&
          (vio->mmio_gva == (addr & PAGE_MASK)) )
     {
-        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
-        while ( (off + chunk) <= PAGE_SIZE )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                return rc;
-            off += chunk;
-            gpa += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-        }
+        unsigned int off = 0;
 
-        return X86EMUL_UNHANDLEABLE;
+        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) |
+               (addr & (PAGE_SIZE - 1)));
+
+        return hvmemul_phys_mmio_write(gpa, bytes, p_data, &off);
     }
 
     if ( (seg != x86_seg_none) &&
@@ -834,30 +892,8 @@ static int hvmemul_write(
     case HVMCOPY_bad_gva_to_gfn:
         return X86EMUL_EXCEPTION;
     case HVMCOPY_bad_gfn_to_mfn:
-        rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                    hvmemul_ctxt);
-        while ( rc == X86EMUL_OKAY )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                break;
-            addr += chunk;
-            off += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-            if ( off < PAGE_SIZE )
-                gpa += chunk;
-            else
-            {
-                rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                            hvmemul_ctxt);
-                off = 0;
-            }
-        }
-        return rc;
+        return hvmemul_linear_mmio_write(addr, bytes, p_data,
+                                         pfec, hvmemul_ctxt);
     case HVMCOPY_gfn_paged_out:
     case HVMCOPY_gfn_shared:
         return X86EMUL_RETRY;
-- 
1.7.10.4

  parent reply	other threads:[~2015-06-11 15:49 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-11 15:42 [PATCH v2 00/17] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-06-11 15:42 ` [PATCH v2 01/17] x86/hvm: simplify hvmemul_do_io() Paul Durrant
2015-06-17 13:31   ` Jan Beulich
2015-06-17 13:54     ` Paul Durrant
2015-06-17 14:47       ` Jan Beulich
2015-06-17 14:55         ` Paul Durrant
2015-06-17 14:59           ` Jan Beulich
2015-06-11 15:42 ` [PATCH v2 02/17] x86/hvm: re-name struct hvm_mmio_handler to hvm_mmio_ops Paul Durrant
2015-06-17 12:43   ` Jan Beulich
2015-06-17 12:45     ` Paul Durrant
2015-06-11 15:42 ` [PATCH v2 03/17] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-06-17 14:22   ` Jan Beulich
2015-06-17 14:40     ` Paul Durrant
2015-06-17 14:55       ` Jan Beulich
2015-06-11 15:42 ` [PATCH v2 04/17] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-06-17 14:36   ` Jan Beulich
2015-06-17 14:46     ` Paul Durrant
2015-06-17 14:58       ` Jan Beulich
2015-06-17 15:17         ` Paul Durrant
2015-06-11 15:42 ` [PATCH v2 05/17] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-06-17 15:50   ` Jan Beulich
2015-06-17 16:30     ` Paul Durrant
2015-06-18  6:23       ` Jan Beulich
2015-06-11 15:42 ` [PATCH v2 06/17] x86/hvm: revert 82ed8716b "fix direct PCI port I/O emulation retry Paul Durrant
2015-06-17 10:54   ` Paul Durrant
2015-06-11 15:42 ` [PATCH v2 07/17] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-06-11 15:42 ` [PATCH v2 08/17] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-06-11 15:42 ` [PATCH v2 09/17] x86/hvm: remove hvm_io_pending() check in hvmemul_do_io() Paul Durrant
2015-06-11 15:42 ` [PATCH v2 10/17] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-06-11 15:42 ` [PATCH v2 11/17] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-06-11 15:42 ` [PATCH v2 12/17] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-06-11 15:42 ` [PATCH v2 13/17] x86/hvm: only acquire RAM pages for emulation when we need to Paul Durrant
2015-06-11 15:42 ` [PATCH v2 14/17] x86/hvm: remove extraneous parameter from hvmtrace_io_assist() Paul Durrant
2015-06-11 15:42 ` [PATCH v2 15/17] x86/hvm: make sure translated MMIO reads or writes fall within a page Paul Durrant
2015-06-11 15:43 ` Paul Durrant [this message]
2015-06-11 15:43 ` [PATCH v2 17/17] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-06-12 10:44 ` [PATCH v2 00/17] x86/hvm: I/O emulation cleanup and fix Fabio Fantoni
2015-06-12 11:45   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1434037381-10917-17-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.