From mboxrd@z Thu Jan 1 00:00:00 1970 From: Paul Durrant Subject: [PATCH v2 17/17] x86/hvm: track large memory mapped accesses by buffer offset Date: Thu, 11 Jun 2015 16:43:01 +0100 Message-ID: <1434037381-10917-18-git-send-email-paul.durrant@citrix.com> References: <1434037381-10917-1-git-send-email-paul.durrant@citrix.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mail6.bemta14.messagelabs.com ([193.109.254.103]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1Z34lf-0000Gx-UF for xen-devel@lists.xenproject.org; Thu, 11 Jun 2015 15:51:48 +0000 In-Reply-To: <1434037381-10917-1-git-send-email-paul.durrant@citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xenproject.org Cc: Andrew Cooper , Paul Durrant , Keir Fraser , Jan Beulich List-Id: xen-devel@lists.xenproject.org The code in hvmemul_do_io() that tracks large reads or writes, to avoid re-issue of component I/O, is defeated by accesses across a page boundary because it uses physical address. The code is also only relevant to memory mapped I/O to or from a buffer. This patch re-factors the code and moves it into hvmemul_phys_mmio_access() where it is relevant and tracks using buffer offset rather then address. Signed-off-by: Paul Durrant Cc: Keir Fraser Cc: Jan Beulich Cc: Andrew Cooper --- xen/arch/x86/hvm/emulate.c | 86 ++++++++++++++-------------------------- xen/include/asm-x86/hvm/vcpu.h | 16 ++++---- 2 files changed, 39 insertions(+), 63 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 467f2da..867498f 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -109,29 +109,6 @@ static int hvmemul_do_io( return X86EMUL_UNHANDLEABLE; } - if ( is_mmio && !data_is_addr ) - { - /* Part of a multi-cycle read or write? */ - if ( dir == IOREQ_WRITE ) - { - paddr_t pa = vio->mmio_large_write_pa; - unsigned int bytes = vio->mmio_large_write_bytes; - if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) ) - return X86EMUL_OKAY; - } - else - { - paddr_t pa = vio->mmio_large_read_pa; - unsigned int bytes = vio->mmio_large_read_bytes; - if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) ) - { - memcpy(p_data, &vio->mmio_large_read[addr - pa], - size); - return X86EMUL_OKAY; - } - } - } - switch ( vio->io_req.state ) { case STATE_IOREQ_NONE: @@ -165,34 +142,6 @@ static int hvmemul_do_io( memcpy(p_data, &p.data, size); } - if ( is_mmio && !data_is_addr ) - { - /* Part of a multi-cycle read or write? */ - if ( dir == IOREQ_WRITE ) - { - paddr_t pa = vio->mmio_large_write_pa; - unsigned int bytes = vio->mmio_large_write_bytes; - if ( bytes == 0 ) - pa = vio->mmio_large_write_pa = addr; - if ( addr == (pa + bytes) ) - vio->mmio_large_write_bytes += size; - } - else - { - paddr_t pa = vio->mmio_large_read_pa; - unsigned int bytes = vio->mmio_large_read_bytes; - if ( bytes == 0 ) - pa = vio->mmio_large_read_pa = addr; - if ( (addr == (pa + bytes)) && - ((bytes + size) <= sizeof(vio->mmio_large_read)) ) - { - memcpy(&vio->mmio_large_read[addr - pa], p_data, - size); - vio->mmio_large_read_bytes += size; - } - } - } - *reps = p.count; return X86EMUL_OKAY; default: @@ -625,6 +574,7 @@ static int hvmemul_phys_mmio_access(paddr_t gpa, uint8_t *buffer, unsigned int *off) { + struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io; unsigned long one_rep = 1; unsigned int chunk; int rc = 0; @@ -644,10 +594,34 @@ static int hvmemul_phys_mmio_access(paddr_t gpa, while ( size != 0 ) { - rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0, - &buffer[*off]); - if ( rc != X86EMUL_OKAY ) - break; + /* Have we already done this chunk? */ + if ( (*off + chunk) <= vio->mmio_cache[dir].size ) + { + ASSERT(*off + chunk <= vio->mmio_cache[dir].size); + + if ( dir == IOREQ_READ ) + memcpy(&buffer[*off], + &vio->mmio_cache[IOREQ_READ].buffer[*off], + chunk); + else + ASSERT(memcmp(&buffer[*off], + &vio->mmio_cache[IOREQ_WRITE].buffer[*off], + chunk) == 0); + } + else + { + ASSERT(*off == vio->mmio_cache[dir].size); + + rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0, + &buffer[*off]); + if ( rc != X86EMUL_OKAY ) + break; + + /* Note that we have now done this chunk */ + memcpy(&vio->mmio_cache[dir].buffer[*off], + &buffer[*off], chunk); + vio->mmio_cache[dir].size += chunk; + } /* Advance to the next chunk */ gpa += chunk; @@ -1675,7 +1649,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, rc = X86EMUL_RETRY; if ( rc != X86EMUL_RETRY ) { - vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0; + memset(&vio->mmio_cache, 0, sizeof(vio->mmio_cache)); vio->mmio_insn_bytes = 0; } else diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index 46e89e4..261b649 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -56,13 +56,15 @@ struct hvm_vcpu_io { unsigned long mmio_gva; unsigned long mmio_gpfn; - /* We may read up to m256 as a number of device-model transactions. */ - paddr_t mmio_large_read_pa; - uint8_t mmio_large_read[32]; - unsigned int mmio_large_read_bytes; - /* We may write up to m256 as a number of device-model transactions. */ - unsigned int mmio_large_write_bytes; - paddr_t mmio_large_write_pa; + /* + * We may read or write up to m256 as a number of device-model + * transactions. + */ + struct { + unsigned long size; + uint8_t buffer[32]; + } mmio_cache[2]; /* Indexed by ioreq type */ + /* For retries we shouldn't re-fetch the instruction. */ unsigned int mmio_insn_bytes; unsigned char mmio_insn[16]; -- 1.7.10.4