* [PATCH] KVM: VMX: access regs array in vmenter.S in its natural order
@ 2020-03-10 17:10 Uros Bizjak
2020-03-10 18:24 ` Sean Christopherson
0 siblings, 1 reply; 4+ messages in thread
From: Uros Bizjak @ 2020-03-10 17:10 UTC (permalink / raw
To: kvm, pbonzini; +Cc: Uros Bizjak
Registers in "regs" array are indexed as rax/rcx/rdx/.../rsi/rdi/r8/...
Reorder access to "regs" array in vmenter.S to follow its natural order.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
---
arch/x86/kvm/vmx/vmenter.S | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 81ada2ce99e7..ca2065166d1d 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -135,12 +135,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
cmpb $0, %bl
/* Load guest registers. Don't clobber flags. */
- mov VCPU_RBX(%_ASM_AX), %_ASM_BX
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
mov VCPU_RDX(%_ASM_AX), %_ASM_DX
+ mov VCPU_RBX(%_ASM_AX), %_ASM_BX
+ mov VCPU_RBP(%_ASM_AX), %_ASM_BP
mov VCPU_RSI(%_ASM_AX), %_ASM_SI
mov VCPU_RDI(%_ASM_AX), %_ASM_DI
- mov VCPU_RBP(%_ASM_AX), %_ASM_BP
#ifdef CONFIG_X86_64
mov VCPU_R8 (%_ASM_AX), %r8
mov VCPU_R9 (%_ASM_AX), %r9
@@ -168,12 +168,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Save all guest registers, including RAX from the stack */
__ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
- mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
+ mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
+ mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
- mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
#ifdef CONFIG_X86_64
mov %r8, VCPU_R8 (%_ASM_AX)
mov %r9, VCPU_R9 (%_ASM_AX)
@@ -197,12 +197,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
* free. RSP and RAX are exempt as RSP is restored by hardware during
* VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
*/
-1: xor %ebx, %ebx
- xor %ecx, %ecx
+1: xor %ecx, %ecx
xor %edx, %edx
+ xor %ebx, %ebx
+ xor %ebp, %ebp
xor %esi, %esi
xor %edi, %edi
- xor %ebp, %ebp
#ifdef CONFIG_X86_64
xor %r8d, %r8d
xor %r9d, %r9d
--
2.24.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] KVM: VMX: access regs array in vmenter.S in its natural order
2020-03-10 17:10 [PATCH] KVM: VMX: access regs array in vmenter.S in its natural order Uros Bizjak
@ 2020-03-10 18:24 ` Sean Christopherson
2020-03-10 19:16 ` Uros Bizjak
0 siblings, 1 reply; 4+ messages in thread
From: Sean Christopherson @ 2020-03-10 18:24 UTC (permalink / raw
To: Uros Bizjak; +Cc: kvm, pbonzini
On Tue, Mar 10, 2020 at 06:10:24PM +0100, Uros Bizjak wrote:
> Registers in "regs" array are indexed as rax/rcx/rdx/.../rsi/rdi/r8/...
> Reorder access to "regs" array in vmenter.S to follow its natural order.
Any reason other than preference? I wouldn't exactly call the register
indices "natural", e.g. IMO it's easier to visually confirm correctness if
A/B/C/D are ordered alphabetically.
> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> ---
> arch/x86/kvm/vmx/vmenter.S | 14 +++++++-------
> 1 file changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> index 81ada2ce99e7..ca2065166d1d 100644
> --- a/arch/x86/kvm/vmx/vmenter.S
> +++ b/arch/x86/kvm/vmx/vmenter.S
> @@ -135,12 +135,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
> cmpb $0, %bl
>
> /* Load guest registers. Don't clobber flags. */
> - mov VCPU_RBX(%_ASM_AX), %_ASM_BX
> mov VCPU_RCX(%_ASM_AX), %_ASM_CX
> mov VCPU_RDX(%_ASM_AX), %_ASM_DX
> + mov VCPU_RBX(%_ASM_AX), %_ASM_BX
> + mov VCPU_RBP(%_ASM_AX), %_ASM_BP
> mov VCPU_RSI(%_ASM_AX), %_ASM_SI
> mov VCPU_RDI(%_ASM_AX), %_ASM_DI
> - mov VCPU_RBP(%_ASM_AX), %_ASM_BP
> #ifdef CONFIG_X86_64
> mov VCPU_R8 (%_ASM_AX), %r8
> mov VCPU_R9 (%_ASM_AX), %r9
> @@ -168,12 +168,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
>
> /* Save all guest registers, including RAX from the stack */
> __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
> - mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
> mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
> mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
> + mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
> + mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
> mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
> mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
> - mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
> #ifdef CONFIG_X86_64
> mov %r8, VCPU_R8 (%_ASM_AX)
> mov %r9, VCPU_R9 (%_ASM_AX)
> @@ -197,12 +197,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
> * free. RSP and RAX are exempt as RSP is restored by hardware during
> * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
> */
> -1: xor %ebx, %ebx
> - xor %ecx, %ecx
> +1: xor %ecx, %ecx
> xor %edx, %edx
> + xor %ebx, %ebx
> + xor %ebp, %ebp
> xor %esi, %esi
> xor %edi, %edi
> - xor %ebp, %ebp
> #ifdef CONFIG_X86_64
> xor %r8d, %r8d
> xor %r9d, %r9d
> --
> 2.24.1
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] KVM: VMX: access regs array in vmenter.S in its natural order
2020-03-10 18:24 ` Sean Christopherson
@ 2020-03-10 19:16 ` Uros Bizjak
2020-03-11 18:29 ` Paolo Bonzini
0 siblings, 1 reply; 4+ messages in thread
From: Uros Bizjak @ 2020-03-10 19:16 UTC (permalink / raw
To: Sean Christopherson; +Cc: kvm, Paolo Bonzini
On Tue, Mar 10, 2020 at 7:24 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Tue, Mar 10, 2020 at 06:10:24PM +0100, Uros Bizjak wrote:
> > Registers in "regs" array are indexed as rax/rcx/rdx/.../rsi/rdi/r8/...
> > Reorder access to "regs" array in vmenter.S to follow its natural order.
>
> Any reason other than preference? I wouldn't exactly call the register
> indices "natural", e.g. IMO it's easier to visually confirm correctness if
> A/B/C/D are ordered alphabetically.
Yes. Looking at assembly, the offsets now increase nicely:
71: 48 8b 48 08 mov 0x8(%rax),%rcx
75: 48 8b 50 10 mov 0x10(%rax),%rdx
79: 48 8b 58 18 mov 0x18(%rax),%rbx
7d: 48 8b 68 28 mov 0x28(%rax),%rbp
81: 48 8b 70 30 mov 0x30(%rax),%rsi
85: 48 8b 78 38 mov 0x38(%rax),%rdi
89: 4c 8b 40 40 mov 0x40(%rax),%r8
8d: 4c 8b 48 48 mov 0x48(%rax),%r9
91: 4c 8b 50 50 mov 0x50(%rax),%r10
95: 4c 8b 58 58 mov 0x58(%rax),%r11
99: 4c 8b 60 60 mov 0x60(%rax),%r12
9d: 4c 8b 68 68 mov 0x68(%rax),%r13
a1: 4c 8b 70 70 mov 0x70(%rax),%r14
a5: 4c 8b 78 78 mov 0x78(%rax),%r15
and noticing that ptrace.c processes registers in the order of their
position in pt_regs, I was under impression that the current vmenter.S
order is some remnant of recent __asm to .S conversion.
For sure, I can happily live with current order, so not a big thing,
and the patch could be ignored without much fuss.
FYI, the original x86 registers were not named in alphabetical order.
Their names are explained in e.g. [1].
[1] https://en.wikibooks.org/wiki/X86_Assembly/X86_Architecture
>
> > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > ---
> > arch/x86/kvm/vmx/vmenter.S | 14 +++++++-------
> > 1 file changed, 7 insertions(+), 7 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > index 81ada2ce99e7..ca2065166d1d 100644
> > --- a/arch/x86/kvm/vmx/vmenter.S
> > +++ b/arch/x86/kvm/vmx/vmenter.S
> > @@ -135,12 +135,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > cmpb $0, %bl
> >
> > /* Load guest registers. Don't clobber flags. */
> > - mov VCPU_RBX(%_ASM_AX), %_ASM_BX
> > mov VCPU_RCX(%_ASM_AX), %_ASM_CX
> > mov VCPU_RDX(%_ASM_AX), %_ASM_DX
> > + mov VCPU_RBX(%_ASM_AX), %_ASM_BX
> > + mov VCPU_RBP(%_ASM_AX), %_ASM_BP
> > mov VCPU_RSI(%_ASM_AX), %_ASM_SI
> > mov VCPU_RDI(%_ASM_AX), %_ASM_DI
> > - mov VCPU_RBP(%_ASM_AX), %_ASM_BP
> > #ifdef CONFIG_X86_64
> > mov VCPU_R8 (%_ASM_AX), %r8
> > mov VCPU_R9 (%_ASM_AX), %r9
> > @@ -168,12 +168,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
> >
> > /* Save all guest registers, including RAX from the stack */
> > __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
> > - mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
> > mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
> > mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
> > + mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
> > + mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
> > mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
> > mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
> > - mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
> > #ifdef CONFIG_X86_64
> > mov %r8, VCPU_R8 (%_ASM_AX)
> > mov %r9, VCPU_R9 (%_ASM_AX)
> > @@ -197,12 +197,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > * free. RSP and RAX are exempt as RSP is restored by hardware during
> > * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
> > */
> > -1: xor %ebx, %ebx
> > - xor %ecx, %ecx
> > +1: xor %ecx, %ecx
> > xor %edx, %edx
> > + xor %ebx, %ebx
> > + xor %ebp, %ebp
> > xor %esi, %esi
> > xor %edi, %edi
> > - xor %ebp, %ebp
> > #ifdef CONFIG_X86_64
> > xor %r8d, %r8d
> > xor %r9d, %r9d
> > --
> > 2.24.1
> >
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] KVM: VMX: access regs array in vmenter.S in its natural order
2020-03-10 19:16 ` Uros Bizjak
@ 2020-03-11 18:29 ` Paolo Bonzini
0 siblings, 0 replies; 4+ messages in thread
From: Paolo Bonzini @ 2020-03-11 18:29 UTC (permalink / raw
To: Uros Bizjak, Sean Christopherson; +Cc: kvm
After all I probably prefer Uros's order, since he took the time to send
a patch I'll apply it.
And now, a short history lecture on 8-bit processors for the younger,
and a walk down memory lane for everyone else...
On 10/03/20 20:16, Uros Bizjak wrote:
> FYI, the original x86 registers were not named in alphabetical order.
> Their names are explained in e.g. [1].
I think the reason why BX comes last is that it maps to the HL register
on the 8080, where "location pointed by HL" was the only available
addressing mode.
Likewise, CX maps to the BC register of the 8080. That is only really
apparent if you take into account Z80 extensions such as the DJNZ or
LDIR instructions, but the Z80 predates the 8086 anyway.
So A -> AX, BC -> CX, DE -> DX, HL -> BX.
Thanks,
Roy Batty^A^KPaolo
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2020-03-11 18:30 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-03-10 17:10 [PATCH] KVM: VMX: access regs array in vmenter.S in its natural order Uros Bizjak
2020-03-10 18:24 ` Sean Christopherson
2020-03-10 19:16 ` Uros Bizjak
2020-03-11 18:29 ` Paolo Bonzini
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.