All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: Paul Durrant <paul.durrant@citrix.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Keir Fraser <keir@xen.org>,
	xen-devel@lists.xenproject.org
Subject: Re: [PATCH v2 03/17] x86/hvm: unify internal portio and mmio intercepts
Date: Wed, 17 Jun 2015 15:22:35 +0100	[thread overview]
Message-ID: <55819ECB0200007800086386@mail.emea.novell.com> (raw)
In-Reply-To: <1434037381-10917-4-git-send-email-paul.durrant@citrix.com>

>>> On 11.06.15 at 17:42, <paul.durrant@citrix.com> wrote:
> - fail2:
> + fail6:
>      rtc_deinit(d);
>      stdvga_deinit(d);
>      vioapic_deinit(d);
> - fail1:
> + fail5:
>      if ( is_hardware_domain(d) )
>          xfree(d->arch.hvm_domain.io_bitmap);
> -    xfree(d->arch.hvm_domain.io_handler);
> + fail4:
> +    xfree(d->arch.hvm_domain.portio_handler);
> + fail3:
> +    xfree(d->arch.hvm_domain.mmio_handler);
> + fail2:
>      xfree(d->arch.hvm_domain.params);
> + fail1:
>   fail0:

We certainly don't want two consecutive labels. And with struct
domain starting out zero-initialized you don't need these many
earlier labels either (xfree(NULL) is perfectly fine).

> @@ -86,31 +175,40 @@ static int hvm_mmio_access(struct vcpu *v,
>              }
>              else
>              {
> -                rc = read(v, p->addr + step * i, p->size, &data);
> +                addr = (p->type == IOREQ_TYPE_COPY) ?
> +                    p->addr + step * i :
> +                    p->addr;
> +                rc = ops->read(handler, v, addr, p->size, &data);

Why this IOREQ_TYPE_COPY dependency that wasn't there before?

>                  if ( rc != X86EMUL_OKAY )
>                      break;
>              }
> -            switch ( hvm_copy_to_guest_phys(p->data + step * i,
> -                                            &data, p->size) )
> +
> +            if ( p->data_is_ptr )
>              {
> -            case HVMCOPY_okay:
> -                break;
> -            case HVMCOPY_gfn_paged_out:
> -            case HVMCOPY_gfn_shared:
> -                rc = X86EMUL_RETRY;
> -                break;
> -            case HVMCOPY_bad_gfn_to_mfn:
> -                /* Drop the write as real hardware would. */
> -                continue;
> -            case HVMCOPY_bad_gva_to_gfn:
> -                ASSERT(0);
> -                /* fall through */
> -            default:
> -                rc = X86EMUL_UNHANDLEABLE;
> -                break;
> +                switch ( hvm_copy_to_guest_phys(p->data + step * i,
> +                                                &data, p->size) )
> +                {
> +                case HVMCOPY_okay:
> +                    break;
> +                case HVMCOPY_gfn_paged_out:
> +                case HVMCOPY_gfn_shared:
> +                    rc = X86EMUL_RETRY;
> +                    break;
> +                case HVMCOPY_bad_gfn_to_mfn:
> +                    /* Drop the write as real hardware would. */
> +                    continue;
> +                case HVMCOPY_bad_gva_to_gfn:
> +                    ASSERT(0);

ASSERT_UNREACHABLE() please if you already touch such code.

> @@ -163,240 +270,182 @@ static int hvm_mmio_access(struct vcpu *v,
>      return rc;
>  }
>  
> -bool_t hvm_mmio_internal(paddr_t gpa)
> -{
> -    struct vcpu *curr = current;
> -    unsigned int i;
> -
> -    for ( i = 0; i < HVM_MMIO_HANDLER_NR; ++i )
> -        if ( hvm_mmio_handlers[i]->check(curr, gpa) )
> -            return 1;
> +static DEFINE_RCU_READ_LOCK(intercept_rcu_lock);
>  
> -    return 0;
> -}
> -
> -int hvm_mmio_intercept(ioreq_t *p)
> +struct hvm_io_handler *hvm_find_io_handler(struct vcpu *v,

I suppose v == current here, i.e. it should be named curr. And I
guess the function should return a pointer to const.

> +                                           ioreq_t *p)
>  {
> -    struct vcpu *v = current;
> -    int i;
> +    struct domain *d = v->domain;
> +    struct hvm_io_handler *handler;
>  
> -    for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ )
> -    {
> -        hvm_mmio_check_t check =
> -            hvm_mmio_handlers[i]->check;
> +    rcu_read_lock(&intercept_rcu_lock);
>  
> -        if ( check(v, p->addr) )
> -        {
> -            if ( unlikely(p->count > 1) &&
> -                 !check(v, unlikely(p->df)
> -                        ? p->addr - (p->count - 1L) * p->size
> -                        : p->addr + (p->count - 1L) * p->size) )
> -                p->count = 1;
> -
> -            return hvm_mmio_access(
> -                v, p,
> -                hvm_mmio_handlers[i]->read,
> -                hvm_mmio_handlers[i]->write);
> -        }
> -    }
> -
> -    return X86EMUL_UNHANDLEABLE;
> -}
> -
> -static int process_portio_intercept(portio_action_t action, ioreq_t *p)
> -{
> -    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
> -    int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
> -    uint32_t data;
> -
> -    if ( !p->data_is_ptr )
> +    list_for_each_entry( handler,
> +                         &d->arch.hvm_domain.io_handler.list,
> +                         list_entry )
>      {
> -        if ( p->dir == IOREQ_READ )
> -        {
> -            if ( vio->mmio_retrying )
> -            {
> -                if ( vio->mmio_large_read_bytes != p->size )
> -                    return X86EMUL_UNHANDLEABLE;
> -                memcpy(&data, vio->mmio_large_read, p->size);
> -                vio->mmio_large_read_bytes = 0;
> -                vio->mmio_retrying = 0;
> -            }
> -            else
> -                rc = action(IOREQ_READ, p->addr, p->size, &data);
> -            p->data = data;
> -        }
> -        else
> -        {
> -            data = p->data;
> -            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
> -        }
> -        return rc;
> -    }
> +        const struct hvm_io_ops *ops = handler->ops;
> +        uint64_t start, end;
>  
> -    if ( p->dir == IOREQ_READ )
> -    {
> -        for ( i = 0; i < p->count; i++ )
> +        if ( handler->type != p->type )
> +            continue;
> +
> +        switch ( handler->type )
>          {
> -            if ( vio->mmio_retrying )
> +        case IOREQ_TYPE_PIO:
> +            start = p->addr;
> +            end = p->addr + p->size;
> +            break;
> +        case IOREQ_TYPE_COPY:
> +            if ( p->df )
>              {
> -                if ( vio->mmio_large_read_bytes != p->size )
> -                    return X86EMUL_UNHANDLEABLE;
> -                memcpy(&data, vio->mmio_large_read, p->size);
> -                vio->mmio_large_read_bytes = 0;
> -                vio->mmio_retrying = 0;
> +                start = (p->addr - (p->count - 1) * p->size);

You're re-introducing a problem here that I fixed not so long ago:
The way it's coded here the multiplication is a 32-bit unsigned one,
but we need it to be a 64-bit signed one. I.e. you need 1L here
(and elsewhere) instead of just 1.

> +                end = p->addr + p->size;
>              }
>              else
>              {
> -                rc = action(IOREQ_READ, p->addr, p->size, &data);
> -                if ( rc != X86EMUL_OKAY )
> -                    break;
> -            }
> -            switch ( hvm_copy_to_guest_phys(p->data + step * i,
> -                                            &data, p->size) )
> -            {
> -            case HVMCOPY_okay:
> -                break;
> -            case HVMCOPY_gfn_paged_out:
> -            case HVMCOPY_gfn_shared:
> -                rc = X86EMUL_RETRY;
> -                break;
> -            case HVMCOPY_bad_gfn_to_mfn:
> -                /* Drop the write as real hardware would. */
> -                continue;
> -            case HVMCOPY_bad_gva_to_gfn:
> -                ASSERT(0);
> -                /* fall through */
> -            default:
> -                rc = X86EMUL_UNHANDLEABLE;
> -                break;
> +                start = p->addr;
> +                end = p->addr + p->count * p->size;

Same here - one of the multiplicands needs to be extended to long.

> +done:

Labels indented by at least one blank please.

> +void register_mmio_handler(struct domain *d, const struct hvm_mmio_ops *ops)
> +{
> +    struct hvm_mmio_handler *mmio_handler;
> +    unsigned int i;
> +
> +    for ( i = 0; i < NR_MMIO_HANDLERS; i++ )
>      {
> -        if ( type != handler->hdl_list[i].type )
> -            continue;
> -        addr = handler->hdl_list[i].addr;
> -        size = handler->hdl_list[i].size;
> -        if ( (p->addr >= addr) &&
> -             ((p->addr + p->size) <= (addr + size)) )
> -        {
> -            if ( type == HVM_PORTIO )
> -                return process_portio_intercept(
> -                    handler->hdl_list[i].action.portio, p);
> +        mmio_handler = &d->arch.hvm_domain.mmio_handler[i];
>  
> -            if ( unlikely(p->count > 1) &&
> -                 (unlikely(p->df)
> -                  ? p->addr - (p->count - 1L) * p->size < addr
> -                  : p->addr + p->count * 1L * p->size - 1 >= addr + size) )
> -                p->count = 1;
> +        if ( mmio_handler->io_handler.ops == NULL )
> +            break;
> +    }
>  
> -            return handler->hdl_list[i].action.mmio(p);
> -        }
> +    BUG_ON(i == NR_MMIO_HANDLERS);
> +
> +    mmio_handler->io_handler.type = IOREQ_TYPE_COPY;
> +    mmio_handler->io_handler.ops = &mmio_ops;
> +
> +    hvm_register_io_handler(d, &mmio_handler->io_handler, 0);

So you're building a linked list of elements you could go through as
an array? And the ->io_handler initialization is the same for all array
elements. This doesn't look like clear simplification to me.

> +    mmio_handler->ops = ops;

And why is this being done after registration?

And is _all_ of this really usefully per-domain anyway?

> -void relocate_io_handler(
> -    struct domain *d, unsigned long old_addr, unsigned long new_addr,
> -    unsigned long size, int type)
> +bool_t hvm_mmio_internal(paddr_t gpa)
>  {
> -    struct hvm_io_handler *handler = d->arch.hvm_domain.io_handler;
> -    int i;
> -
> -    for ( i = 0; i < handler->num_slot; i++ )
> -        if ( (handler->hdl_list[i].addr == old_addr) &&
> -             (handler->hdl_list[i].size == size) &&
> -             (handler->hdl_list[i].type == type) )
> -            handler->hdl_list[i].addr = new_addr;
> +    ioreq_t p = {
> +        .type = IOREQ_TYPE_COPY,
> +        .addr = gpa
> +    };
> +
> +    return (hvm_find_io_handler(current, &p) != NULL);

Pointless parentheses.

> -typedef int (*portio_action_t)(
> -    int dir, uint32_t port, uint32_t bytes, uint32_t *val);
> -typedef int (*mmio_action_t)(ioreq_t *);
> -struct io_handler {
> -    int                 type;
> -    unsigned long       addr;
> -    unsigned long       size;
> -    union {
> -        portio_action_t portio;
> -        mmio_action_t   mmio;
> -        void           *ptr;
> -    } action;
> -};
> -
> -struct hvm_io_handler {
> -    int     num_slot;
> -    struct  io_handler hdl_list[MAX_IO_HANDLER];
> -};
> -
>  struct hvm_mmio_ops {
>      hvm_mmio_check_t check;
>      hvm_mmio_read_t  read;
>      hvm_mmio_write_t write;
>  };
>  
> -extern const struct hvm_mmio_ops hpet_mmio_ops;
> -extern const struct hvm_mmio_ops vlapic_mmio_ops;
> -extern const struct hvm_mmio_ops vioapic_mmio_ops;
> -extern const struct hvm_mmio_ops msixtbl_mmio_ops;
> -extern const struct hvm_mmio_ops iommu_mmio_ops;

As a result they should all become static in their source files.

> +#define NR_MMIO_HANDLERS 5
>  
> -#define HVM_MMIO_HANDLER_NR 5
> +struct hvm_mmio_handler {
> +    const struct hvm_mmio_ops *ops;
> +    struct hvm_io_handler     io_handler;
> +};
>  
> -int hvm_io_intercept(ioreq_t *p, int type);
> -void register_io_handler(
> -    struct domain *d, unsigned long addr, unsigned long size,
> -    void *action, int type);
> -void relocate_io_handler(
> -    struct domain *d, unsigned long old_addr, unsigned long new_addr,
> -    unsigned long size, int type);
> +typedef int (*portio_action_t)(
> +    int dir, uint32_t port, uint32_t bytes, uint32_t *val);
> +
> +#define NR_PORTIO_HANDLERS 16
> +
> +struct hvm_portio_handler {
> +    uint64_t              start, end;

Why uint64_t? (Note that this is simply the place I spotted this first
at - the problem appears to exist in all of the port I/O handling.)

Jan

  reply	other threads:[~2015-06-17 14:22 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-11 15:42 [PATCH v2 00/17] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-06-11 15:42 ` [PATCH v2 01/17] x86/hvm: simplify hvmemul_do_io() Paul Durrant
2015-06-17 13:31   ` Jan Beulich
2015-06-17 13:54     ` Paul Durrant
2015-06-17 14:47       ` Jan Beulich
2015-06-17 14:55         ` Paul Durrant
2015-06-17 14:59           ` Jan Beulich
2015-06-11 15:42 ` [PATCH v2 02/17] x86/hvm: re-name struct hvm_mmio_handler to hvm_mmio_ops Paul Durrant
2015-06-17 12:43   ` Jan Beulich
2015-06-17 12:45     ` Paul Durrant
2015-06-11 15:42 ` [PATCH v2 03/17] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-06-17 14:22   ` Jan Beulich [this message]
2015-06-17 14:40     ` Paul Durrant
2015-06-17 14:55       ` Jan Beulich
2015-06-11 15:42 ` [PATCH v2 04/17] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-06-17 14:36   ` Jan Beulich
2015-06-17 14:46     ` Paul Durrant
2015-06-17 14:58       ` Jan Beulich
2015-06-17 15:17         ` Paul Durrant
2015-06-11 15:42 ` [PATCH v2 05/17] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-06-17 15:50   ` Jan Beulich
2015-06-17 16:30     ` Paul Durrant
2015-06-18  6:23       ` Jan Beulich
2015-06-11 15:42 ` [PATCH v2 06/17] x86/hvm: revert 82ed8716b "fix direct PCI port I/O emulation retry Paul Durrant
2015-06-17 10:54   ` Paul Durrant
2015-06-11 15:42 ` [PATCH v2 07/17] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-06-11 15:42 ` [PATCH v2 08/17] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-06-11 15:42 ` [PATCH v2 09/17] x86/hvm: remove hvm_io_pending() check in hvmemul_do_io() Paul Durrant
2015-06-11 15:42 ` [PATCH v2 10/17] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-06-11 15:42 ` [PATCH v2 11/17] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-06-11 15:42 ` [PATCH v2 12/17] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-06-11 15:42 ` [PATCH v2 13/17] x86/hvm: only acquire RAM pages for emulation when we need to Paul Durrant
2015-06-11 15:42 ` [PATCH v2 14/17] x86/hvm: remove extraneous parameter from hvmtrace_io_assist() Paul Durrant
2015-06-11 15:42 ` [PATCH v2 15/17] x86/hvm: make sure translated MMIO reads or writes fall within a page Paul Durrant
2015-06-11 15:43 ` [PATCH v2 16/17] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-06-11 15:43 ` [PATCH v2 17/17] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-06-12 10:44 ` [PATCH v2 00/17] x86/hvm: I/O emulation cleanup and fix Fabio Fantoni
2015-06-12 11:45   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=55819ECB0200007800086386@mail.emea.novell.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=keir@xen.org \
    --cc=paul.durrant@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.