* [PATCH 0/2] x86emul: segment handling additions
@ 2016-12-08 11:42 Jan Beulich
2016-12-08 11:51 ` [PATCH 1/2] x86emul: support 64-bit segment descriptor types Jan Beulich
2016-12-08 11:52 ` [PATCH 2/2] x86emul: support LAR/LSL/VERR/VERW Jan Beulich
0 siblings, 2 replies; 6+ messages in thread
From: Jan Beulich @ 2016-12-08 11:42 UTC (permalink / raw
To: xen-devel; +Cc: Andrew Cooper
1: support 64-bit segment descriptor types
2: support LAR/LSL/VERR/VERW
Signed-off-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 1/2] x86emul: support 64-bit segment descriptor types
2016-12-08 11:42 [PATCH 0/2] x86emul: segment handling additions Jan Beulich
@ 2016-12-08 11:51 ` Jan Beulich
2016-12-08 11:53 ` Andrew Cooper
2016-12-08 11:52 ` [PATCH 2/2] x86emul: support LAR/LSL/VERR/VERW Jan Beulich
1 sibling, 1 reply; 6+ messages in thread
From: Jan Beulich @ 2016-12-08 11:51 UTC (permalink / raw
To: xen-devel; +Cc: Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 2383 bytes --]
This is a prereq particularly to eventually supporting UMIP emulation,
but also for LAR/LSL/VERR/VERW.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1355,7 +1355,7 @@ protmode_load_seg(
const struct x86_emulate_ops *ops)
{
enum x86_segment sel_seg = (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr;
- struct { uint32_t a, b; } desc;
+ struct { uint32_t a, b; } desc, desc_hi = {};
uint8_t dpl, rpl;
int cpl = get_cpl(ctxt, ops);
uint32_t a_flag = 0x100;
@@ -1406,9 +1406,6 @@ protmode_load_seg(
/* System segments must have S flag == 0. */
if ( desc.b & (1u << 12) )
goto raise_exn;
- /* We do not support 64-bit descriptor types. */
- if ( in_longmode(ctxt, ops) )
- return X86EMUL_UNHANDLEABLE;
}
/* User segments must have S flag == 1. */
else if ( !(desc.b & (1u << 12)) )
@@ -1482,6 +1479,33 @@ protmode_load_seg(
goto raise_exn;
}
+ if ( !is_x86_user_segment(seg) )
+ {
+ int lm = in_longmode(ctxt, ops);
+
+ if ( lm < 0 )
+ return X86EMUL_UNHANDLEABLE;
+ if ( lm )
+ {
+ switch ( rc = ops->read(sel_seg, (sel & 0xfff8) + 8,
+ &desc_hi, sizeof(desc_hi), ctxt) )
+ {
+ case X86EMUL_OKAY:
+ break;
+
+ case X86EMUL_EXCEPTION:
+ if ( !ctxt->event_pending )
+ goto raise_exn;
+ /* fall through */
+ default:
+ return rc;
+ }
+ if ( (desc_hi.b & 0x00001f00) ||
+ !is_canonical_address((uint64_t)desc_hi.a << 32) )
+ goto raise_exn;
+ }
+ }
+
/* Ensure Accessed flag is set. */
if ( a_flag && !(desc.b & a_flag) )
{
@@ -1507,7 +1531,8 @@ protmode_load_seg(
desc.b = new_desc_b;
}
- sreg->base = (((desc.b << 0) & 0xff000000u) |
+ sreg->base = (((uint64_t)desc_hi.a << 32) |
+ ((desc.b << 0) & 0xff000000u) |
((desc.b << 16) & 0x00ff0000u) |
((desc.a >> 16) & 0x0000ffffu));
sreg->attr.bytes = (((desc.b >> 8) & 0x00ffu) |
[-- Attachment #2: x86emul-64bit-desc-types.patch --]
[-- Type: text/plain, Size: 2429 bytes --]
x86emul: support 64-bit segment descriptor types
This is a prereq particularly to eventually supporting UMIP emulation,
but also for LAR/LSL/VERR/VERW.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1355,7 +1355,7 @@ protmode_load_seg(
const struct x86_emulate_ops *ops)
{
enum x86_segment sel_seg = (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr;
- struct { uint32_t a, b; } desc;
+ struct { uint32_t a, b; } desc, desc_hi = {};
uint8_t dpl, rpl;
int cpl = get_cpl(ctxt, ops);
uint32_t a_flag = 0x100;
@@ -1406,9 +1406,6 @@ protmode_load_seg(
/* System segments must have S flag == 0. */
if ( desc.b & (1u << 12) )
goto raise_exn;
- /* We do not support 64-bit descriptor types. */
- if ( in_longmode(ctxt, ops) )
- return X86EMUL_UNHANDLEABLE;
}
/* User segments must have S flag == 1. */
else if ( !(desc.b & (1u << 12)) )
@@ -1482,6 +1479,33 @@ protmode_load_seg(
goto raise_exn;
}
+ if ( !is_x86_user_segment(seg) )
+ {
+ int lm = in_longmode(ctxt, ops);
+
+ if ( lm < 0 )
+ return X86EMUL_UNHANDLEABLE;
+ if ( lm )
+ {
+ switch ( rc = ops->read(sel_seg, (sel & 0xfff8) + 8,
+ &desc_hi, sizeof(desc_hi), ctxt) )
+ {
+ case X86EMUL_OKAY:
+ break;
+
+ case X86EMUL_EXCEPTION:
+ if ( !ctxt->event_pending )
+ goto raise_exn;
+ /* fall through */
+ default:
+ return rc;
+ }
+ if ( (desc_hi.b & 0x00001f00) ||
+ !is_canonical_address((uint64_t)desc_hi.a << 32) )
+ goto raise_exn;
+ }
+ }
+
/* Ensure Accessed flag is set. */
if ( a_flag && !(desc.b & a_flag) )
{
@@ -1507,7 +1531,8 @@ protmode_load_seg(
desc.b = new_desc_b;
}
- sreg->base = (((desc.b << 0) & 0xff000000u) |
+ sreg->base = (((uint64_t)desc_hi.a << 32) |
+ ((desc.b << 0) & 0xff000000u) |
((desc.b << 16) & 0x00ff0000u) |
((desc.a >> 16) & 0x0000ffffu));
sreg->attr.bytes = (((desc.b >> 8) & 0x00ffu) |
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 2/2] x86emul: support LAR/LSL/VERR/VERW
2016-12-08 11:42 [PATCH 0/2] x86emul: segment handling additions Jan Beulich
2016-12-08 11:51 ` [PATCH 1/2] x86emul: support 64-bit segment descriptor types Jan Beulich
@ 2016-12-08 11:52 ` Jan Beulich
2016-12-08 16:24 ` Andrew Cooper
1 sibling, 1 reply; 6+ messages in thread
From: Jan Beulich @ 2016-12-08 11:52 UTC (permalink / raw
To: xen-devel; +Cc: Andrew Cooper
[-- Attachment #1: Type: text/plain, Size: 16619 bytes --]
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -46,7 +46,47 @@ static int read(
if ( verbose )
printf("** %s(%u, %p,, %u,)\n", __func__, seg, (void *)offset, bytes);
- bytes_read += bytes;
+ switch ( seg )
+ {
+ uint64_t value;
+
+ case x86_seg_gdtr:
+ /* Fake system segment type matching table index. */
+ if ( (offset & 7) || (bytes > 8) )
+ return X86EMUL_UNHANDLEABLE;
+#ifdef __x86_64__
+ if ( !(offset & 8) )
+ {
+ memset(p_data, 0, bytes);
+ return X86EMUL_OKAY;
+ }
+ value = (offset - 8) >> 4;
+#else
+ value = (offset - 8) >> 3;
+#endif
+ if ( value >= 0x10 )
+ return X86EMUL_UNHANDLEABLE;
+ value |= value << 40;
+ memcpy(p_data, &value, bytes);
+ return X86EMUL_OKAY;
+
+ case x86_seg_ldtr:
+ /* Fake user segment type matching table index. */
+ if ( (offset & 7) || (bytes > 8) )
+ return X86EMUL_UNHANDLEABLE;
+ value = offset >> 3;
+ if ( value >= 0x10 )
+ return X86EMUL_UNHANDLEABLE;
+ value |= (value | 0x10) << 40;
+ memcpy(p_data, &value, bytes);
+ return X86EMUL_OKAY;
+
+ default:
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+ bytes_read += bytes;
+ break;
+ }
memcpy(p_data, (void *)offset, bytes);
return X86EMUL_OKAY;
}
@@ -75,6 +115,8 @@ static int write(
if ( verbose )
printf("** %s(%u, %p,, %u,)\n", __func__, seg, (void *)offset, bytes);
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
memcpy((void *)offset, p_data, bytes);
return X86EMUL_OKAY;
}
@@ -90,10 +132,24 @@ static int cmpxchg(
if ( verbose )
printf("** %s(%u, %p,, %u,)\n", __func__, seg, (void *)offset, bytes);
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
memcpy((void *)offset, new, bytes);
return X86EMUL_OKAY;
}
+static int read_segment(
+ enum x86_segment seg,
+ struct segment_register *reg,
+ struct x86_emulate_ctxt *ctxt)
+{
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+ memset(reg, 0, sizeof(*reg));
+ reg->attr.fields.p = 1;
+ return X86EMUL_OKAY;
+}
+
static int cpuid(
unsigned int *eax,
unsigned int *ebx,
@@ -193,6 +249,21 @@ static int read_cr(
return X86EMUL_UNHANDLEABLE;
}
+static int read_msr(
+ unsigned int reg,
+ uint64_t *val,
+ struct x86_emulate_ctxt *ctxt)
+{
+ switch ( reg )
+ {
+ case 0xc0000080: /* EFER */
+ *val = ctxt->addr_size > 32 ? 0x500 /* LME|LMA */ : 0;
+ return X86EMUL_OKAY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
+}
+
int get_fpu(
void (*exception_callback)(void *, struct cpu_user_regs *),
void *exception_callback_arg,
@@ -223,8 +294,10 @@ static struct x86_emulate_ops emulops =
.insn_fetch = fetch,
.write = write,
.cmpxchg = cmpxchg,
+ .read_segment = read_segment,
.cpuid = cpuid,
.read_cr = read_cr,
+ .read_msr = read_msr,
.get_fpu = get_fpu,
};
@@ -726,6 +799,156 @@ int main(int argc, char **argv)
goto fail;
printf("okay\n");
+ printf("%-40s", "Testing lar (null selector)...");
+ instr[0] = 0x0f; instr[1] = 0x02; instr[2] = 0xc1;
+ regs.eflags = 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = 0;
+ regs.eax = 0x11111111;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eax != 0x11111111) ||
+ (regs.eflags != 0x200) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing lsl (null selector)...");
+ instr[0] = 0x0f; instr[1] = 0x03; instr[2] = 0xca;
+ regs.eflags = 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.edx = 0;
+ regs.ecx = 0x11111111;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.ecx != 0x11111111) ||
+ (regs.eflags != 0x200) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing verr (null selector)...");
+ instr[0] = 0x0f; instr[1] = 0x00; instr[2] = 0x21;
+ regs.eflags = 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = (unsigned long)res;
+ *res = 0;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eflags != 0x200) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing verw (null selector)...");
+ instr[0] = 0x0f; instr[1] = 0x00; instr[2] = 0x2a;
+ regs.eflags = 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = 0;
+ regs.edx = (unsigned long)res;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eflags != 0x200) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing lar/lsl/verr/verw (all types)...");
+ for ( i = 0; i < 0x20; ++i )
+ {
+ unsigned int sel = i < 0x10 ?
+#ifndef __x86_64__
+ (i << 3) + 8
+#else
+ (i << 4) + 8
+#endif
+ : ((i - 0x10) << 3) | 4;
+ bool failed;
+
+#ifndef __x86_64__
+# define LAR_VALID 0xffff1a3eU
+# define LSL_VALID 0xffff0a0eU
+#else
+# define LAR_VALID 0xffff1a04U
+# define LSL_VALID 0xffff0a04U
+#endif
+#define VERR_VALID 0xccff0000U
+#define VERW_VALID 0x00cc0000U
+
+ instr[0] = 0x0f; instr[1] = 0x02; instr[2] = 0xc2;
+ regs.eflags = (LAR_VALID >> i) & 1 ? 0x200 : 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.edx = sel;
+ regs.eax = 0x11111111;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ if ( (LAR_VALID >> i) & 1 )
+ failed = (regs.eflags != 0x240) ||
+ ((regs.eax & 0xf0ff00) != (i << 8));
+ else
+ failed = (regs.eflags != 0x200) ||
+ (regs.eax != 0x11111111);
+ if ( failed )
+ {
+ printf("LAR %04x (type %02x) ", sel, i);
+ goto fail;
+ }
+
+ instr[0] = 0x0f; instr[1] = 0x03; instr[2] = 0xd1;
+ regs.eflags = (LSL_VALID >> i) & 1 ? 0x200 : 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = sel;
+ regs.edx = 0x11111111;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ if ( (LSL_VALID >> i) & 1 )
+ failed = (regs.eflags != 0x240) ||
+ (regs.edx != (i & 0xf));
+ else
+ failed = (regs.eflags != 0x200) ||
+ (regs.edx != 0x11111111);
+ if ( failed )
+ {
+ printf("LSL %04x (type %02x) ", sel, i);
+ goto fail;
+ }
+
+ instr[0] = 0x0f; instr[1] = 0x00; instr[2] = 0xe2;
+ regs.eflags = (VERR_VALID >> i) & 1 ? 0x200 : 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = 0;
+ regs.edx = sel;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ if ( regs.eflags != ((VERR_VALID >> i) & 1 ? 0x240 : 0x200) )
+ {
+ printf("VERR %04x (type %02x) ", sel, i);
+ goto fail;
+ }
+
+ instr[0] = 0x0f; instr[1] = 0x00; instr[2] = 0xe9;
+ regs.eflags = (VERW_VALID >> i) & 1 ? 0x200 : 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = sel;
+ regs.edx = 0;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ if ( regs.eflags != ((VERW_VALID >> i) & 1 ? 0x240 : 0x200) )
+ {
+ printf("VERW %04x (type %02x) ", sel, i);
+ goto fail;
+ }
+ }
+ printf("okay\n");
+
#define decl_insn(which) extern const unsigned char which[], which##_len[]
#define put_insn(which, insn) ".pushsection .test, \"ax\", @progbits\n" \
#which ": " insn "\n" \
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -182,7 +182,7 @@ static const opcode_desc_t opcode_table[
static const opcode_desc_t twobyte_table[256] = {
/* 0x00 - 0x07 */
- ModRM, ImplicitOps|ModRM, ModRM, ModRM,
+ ModRM, ImplicitOps|ModRM, DstReg|SrcMem16|ModRM, DstReg|SrcMem16|ModRM,
0, ImplicitOps, ImplicitOps, ImplicitOps,
/* 0x08 - 0x0F */
ImplicitOps, ImplicitOps, 0, ImplicitOps,
@@ -1384,7 +1384,7 @@ protmode_load_seg(
}
/* System segment descriptors must reside in the GDT. */
- if ( !is_x86_user_segment(seg) && (sel & 4) )
+ if ( is_x86_system_segment(seg) && (sel & 4) )
goto raise_exn;
switch ( rc = ops->read(sel_seg, sel & 0xfff8, &desc, sizeof(desc), ctxt) )
@@ -1401,14 +1401,11 @@ protmode_load_seg(
return rc;
}
- if ( !is_x86_user_segment(seg) )
- {
- /* System segments must have S flag == 0. */
- if ( desc.b & (1u << 12) )
- goto raise_exn;
- }
+ /* System segments must have S flag == 0. */
+ if ( is_x86_system_segment(seg) && (desc.b & (1u << 12)) )
+ goto raise_exn;
/* User segments must have S flag == 1. */
- else if ( !(desc.b & (1u << 12)) )
+ if ( is_x86_user_segment(seg) && !(desc.b & (1u << 12)) )
goto raise_exn;
dpl = (desc.b >> 13) & 3;
@@ -1470,10 +1467,17 @@ protmode_load_seg(
((dpl < cpl) || (dpl < rpl)) )
goto raise_exn;
break;
+ case x86_seg_none:
+ /* Non-conforming segment: check DPL against RPL and CPL. */
+ if ( ((desc.b & (0x1c << 8)) != (0x1c << 8)) &&
+ ((dpl < cpl) || (dpl < rpl)) )
+ return X86EMUL_EXCEPTION;
+ a_flag = 0;
+ break;
}
/* Segment present in memory? */
- if ( !(desc.b & (1 << 15)) )
+ if ( !(desc.b & (1 << 15)) && seg != x86_seg_none )
{
fault_type = seg != x86_seg_ss ? EXC_NP : EXC_SS;
goto raise_exn;
@@ -1481,7 +1485,7 @@ protmode_load_seg(
if ( !is_x86_user_segment(seg) )
{
- int lm = in_longmode(ctxt, ops);
+ int lm = (desc.b & (1u << 12)) ? 0 : in_longmode(ctxt, ops);
if ( lm < 0 )
return X86EMUL_UNHANDLEABLE;
@@ -1501,7 +1505,8 @@ protmode_load_seg(
return rc;
}
if ( (desc_hi.b & 0x00001f00) ||
- !is_canonical_address((uint64_t)desc_hi.a << 32) )
+ (seg != x86_seg_none &&
+ !is_canonical_address((uint64_t)desc_hi.a << 32)) )
goto raise_exn;
}
}
@@ -1544,7 +1549,8 @@ protmode_load_seg(
return X86EMUL_OKAY;
raise_exn:
- generate_exception(fault_type, sel & 0xfffc);
+ generate_exception_if(seg != x86_seg_none, fault_type, sel & 0xfffc);
+ rc = X86EMUL_EXCEPTION;
done:
return rc;
}
@@ -4424,6 +4430,28 @@ x86_emulate(
if ( (rc = load_seg(seg, src.val, 0, NULL, ctxt, ops)) != 0 )
goto done;
break;
+ case 4: /* verr / verw */
+ _regs.eflags &= ~EFLG_ZF;
+ switch ( rc = protmode_load_seg(x86_seg_none, src.val, false,
+ &sreg, ctxt, ops) )
+ {
+ case X86EMUL_OKAY:
+ if ( sreg.attr.fields.s &&
+ ((modrm_reg & 1) ? ((sreg.attr.fields.type & 0xa) == 0x2)
+ : ((sreg.attr.fields.type & 0xa) != 0x8)) )
+ _regs.eflags |= EFLG_ZF;
+ break;
+ case X86EMUL_EXCEPTION:
+ if ( ctxt->event_pending )
+ {
+ default:
+ goto done;
+ }
+ /* Instead of the exception, ZF remains cleared. */
+ rc = X86EMUL_OKAY;
+ break;
+ }
+ break;
default:
generate_exception_if(true, EXC_UD);
break;
@@ -4621,6 +4649,96 @@ x86_emulate(
break;
}
+ case X86EMUL_OPC(0x0f, 0x02): /* lar */
+ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
+ _regs.eflags &= ~EFLG_ZF;
+ switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
+ ctxt, ops) )
+ {
+ case X86EMUL_OKAY:
+ if ( !sreg.attr.fields.s )
+ {
+ switch ( sreg.attr.fields.type )
+ {
+ case 0x01: /* available 16-bit TSS */
+ case 0x03: /* busy 16-bit TSS */
+ case 0x04: /* 16-bit call gate */
+ case 0x05: /* 16/32-bit task gate */
+ if ( in_longmode(ctxt, ops) )
+ break;
+ /* fall through */
+ case 0x02: /* LDT */
+ case 0x09: /* available 32/64-bit TSS */
+ case 0x0b: /* busy 32/64-bit TSS */
+ case 0x0c: /* 32/64-bit call gate */
+ _regs.eflags |= EFLG_ZF;
+ break;
+ }
+ }
+ else
+ _regs.eflags |= EFLG_ZF;
+ break;
+ case X86EMUL_EXCEPTION:
+ if ( ctxt->event_pending )
+ {
+ default:
+ goto done;
+ }
+ /* Instead of the exception, ZF remains cleared. */
+ rc = X86EMUL_OKAY;
+ break;
+ }
+ if ( _regs.eflags & EFLG_ZF )
+ dst.val = ((sreg.attr.bytes & 0xff) << 8) |
+ ((sreg.limit >> (sreg.attr.fields.g ? 12 : 0)) &
+ 0xf0000) |
+ ((sreg.attr.bytes & 0xf00) << 12);
+ else
+ dst.type = OP_NONE;
+ break;
+
+ case X86EMUL_OPC(0x0f, 0x03): /* lsl */
+ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
+ _regs.eflags &= ~EFLG_ZF;
+ switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
+ ctxt, ops) )
+ {
+ case X86EMUL_OKAY:
+ if ( !sreg.attr.fields.s )
+ {
+ switch ( sreg.attr.fields.type )
+ {
+ case 0x01: /* available 16-bit TSS */
+ case 0x03: /* busy 16-bit TSS */
+ if ( in_longmode(ctxt, ops) )
+ break;
+ /* fall through */
+ case 0x02: /* LDT */
+ case 0x09: /* available 32/64-bit TSS */
+ case 0x0b: /* busy 32/64-bit TSS */
+ _regs.eflags |= EFLG_ZF;
+ break;
+ }
+ }
+ else
+ _regs.eflags |= EFLG_ZF;
+ break;
+ case X86EMUL_EXCEPTION:
+ if ( ctxt->event_pending )
+ {
+ default:
+ goto done;
+ }
+ /* Instead of the exception, ZF remains cleared. */
+ rc = X86EMUL_OKAY;
+ break;
+ }
+ if ( _regs.eflags & EFLG_ZF )
+ dst.val = sreg.limit;
+ else
+ dst.type = OP_NONE;
+ break;
+
case X86EMUL_OPC(0x0f, 0x05): /* syscall */ {
uint64_t msr_content;
[-- Attachment #2: x86emul-lar-lsl-verX.patch --]
[-- Type: text/plain, Size: 16653 bytes --]
x86emul: support LAR/LSL/VERR/VERW
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -46,7 +46,47 @@ static int read(
if ( verbose )
printf("** %s(%u, %p,, %u,)\n", __func__, seg, (void *)offset, bytes);
- bytes_read += bytes;
+ switch ( seg )
+ {
+ uint64_t value;
+
+ case x86_seg_gdtr:
+ /* Fake system segment type matching table index. */
+ if ( (offset & 7) || (bytes > 8) )
+ return X86EMUL_UNHANDLEABLE;
+#ifdef __x86_64__
+ if ( !(offset & 8) )
+ {
+ memset(p_data, 0, bytes);
+ return X86EMUL_OKAY;
+ }
+ value = (offset - 8) >> 4;
+#else
+ value = (offset - 8) >> 3;
+#endif
+ if ( value >= 0x10 )
+ return X86EMUL_UNHANDLEABLE;
+ value |= value << 40;
+ memcpy(p_data, &value, bytes);
+ return X86EMUL_OKAY;
+
+ case x86_seg_ldtr:
+ /* Fake user segment type matching table index. */
+ if ( (offset & 7) || (bytes > 8) )
+ return X86EMUL_UNHANDLEABLE;
+ value = offset >> 3;
+ if ( value >= 0x10 )
+ return X86EMUL_UNHANDLEABLE;
+ value |= (value | 0x10) << 40;
+ memcpy(p_data, &value, bytes);
+ return X86EMUL_OKAY;
+
+ default:
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+ bytes_read += bytes;
+ break;
+ }
memcpy(p_data, (void *)offset, bytes);
return X86EMUL_OKAY;
}
@@ -75,6 +115,8 @@ static int write(
if ( verbose )
printf("** %s(%u, %p,, %u,)\n", __func__, seg, (void *)offset, bytes);
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
memcpy((void *)offset, p_data, bytes);
return X86EMUL_OKAY;
}
@@ -90,10 +132,24 @@ static int cmpxchg(
if ( verbose )
printf("** %s(%u, %p,, %u,)\n", __func__, seg, (void *)offset, bytes);
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
memcpy((void *)offset, new, bytes);
return X86EMUL_OKAY;
}
+static int read_segment(
+ enum x86_segment seg,
+ struct segment_register *reg,
+ struct x86_emulate_ctxt *ctxt)
+{
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+ memset(reg, 0, sizeof(*reg));
+ reg->attr.fields.p = 1;
+ return X86EMUL_OKAY;
+}
+
static int cpuid(
unsigned int *eax,
unsigned int *ebx,
@@ -193,6 +249,21 @@ static int read_cr(
return X86EMUL_UNHANDLEABLE;
}
+static int read_msr(
+ unsigned int reg,
+ uint64_t *val,
+ struct x86_emulate_ctxt *ctxt)
+{
+ switch ( reg )
+ {
+ case 0xc0000080: /* EFER */
+ *val = ctxt->addr_size > 32 ? 0x500 /* LME|LMA */ : 0;
+ return X86EMUL_OKAY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
+}
+
int get_fpu(
void (*exception_callback)(void *, struct cpu_user_regs *),
void *exception_callback_arg,
@@ -223,8 +294,10 @@ static struct x86_emulate_ops emulops =
.insn_fetch = fetch,
.write = write,
.cmpxchg = cmpxchg,
+ .read_segment = read_segment,
.cpuid = cpuid,
.read_cr = read_cr,
+ .read_msr = read_msr,
.get_fpu = get_fpu,
};
@@ -726,6 +799,156 @@ int main(int argc, char **argv)
goto fail;
printf("okay\n");
+ printf("%-40s", "Testing lar (null selector)...");
+ instr[0] = 0x0f; instr[1] = 0x02; instr[2] = 0xc1;
+ regs.eflags = 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = 0;
+ regs.eax = 0x11111111;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eax != 0x11111111) ||
+ (regs.eflags != 0x200) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing lsl (null selector)...");
+ instr[0] = 0x0f; instr[1] = 0x03; instr[2] = 0xca;
+ regs.eflags = 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.edx = 0;
+ regs.ecx = 0x11111111;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.ecx != 0x11111111) ||
+ (regs.eflags != 0x200) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing verr (null selector)...");
+ instr[0] = 0x0f; instr[1] = 0x00; instr[2] = 0x21;
+ regs.eflags = 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = (unsigned long)res;
+ *res = 0;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eflags != 0x200) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing verw (null selector)...");
+ instr[0] = 0x0f; instr[1] = 0x00; instr[2] = 0x2a;
+ regs.eflags = 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = 0;
+ regs.edx = (unsigned long)res;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eflags != 0x200) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing lar/lsl/verr/verw (all types)...");
+ for ( i = 0; i < 0x20; ++i )
+ {
+ unsigned int sel = i < 0x10 ?
+#ifndef __x86_64__
+ (i << 3) + 8
+#else
+ (i << 4) + 8
+#endif
+ : ((i - 0x10) << 3) | 4;
+ bool failed;
+
+#ifndef __x86_64__
+# define LAR_VALID 0xffff1a3eU
+# define LSL_VALID 0xffff0a0eU
+#else
+# define LAR_VALID 0xffff1a04U
+# define LSL_VALID 0xffff0a04U
+#endif
+#define VERR_VALID 0xccff0000U
+#define VERW_VALID 0x00cc0000U
+
+ instr[0] = 0x0f; instr[1] = 0x02; instr[2] = 0xc2;
+ regs.eflags = (LAR_VALID >> i) & 1 ? 0x200 : 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.edx = sel;
+ regs.eax = 0x11111111;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ if ( (LAR_VALID >> i) & 1 )
+ failed = (regs.eflags != 0x240) ||
+ ((regs.eax & 0xf0ff00) != (i << 8));
+ else
+ failed = (regs.eflags != 0x200) ||
+ (regs.eax != 0x11111111);
+ if ( failed )
+ {
+ printf("LAR %04x (type %02x) ", sel, i);
+ goto fail;
+ }
+
+ instr[0] = 0x0f; instr[1] = 0x03; instr[2] = 0xd1;
+ regs.eflags = (LSL_VALID >> i) & 1 ? 0x200 : 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = sel;
+ regs.edx = 0x11111111;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ if ( (LSL_VALID >> i) & 1 )
+ failed = (regs.eflags != 0x240) ||
+ (regs.edx != (i & 0xf));
+ else
+ failed = (regs.eflags != 0x200) ||
+ (regs.edx != 0x11111111);
+ if ( failed )
+ {
+ printf("LSL %04x (type %02x) ", sel, i);
+ goto fail;
+ }
+
+ instr[0] = 0x0f; instr[1] = 0x00; instr[2] = 0xe2;
+ regs.eflags = (VERR_VALID >> i) & 1 ? 0x200 : 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = 0;
+ regs.edx = sel;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ if ( regs.eflags != ((VERR_VALID >> i) & 1 ? 0x240 : 0x200) )
+ {
+ printf("VERR %04x (type %02x) ", sel, i);
+ goto fail;
+ }
+
+ instr[0] = 0x0f; instr[1] = 0x00; instr[2] = 0xe9;
+ regs.eflags = (VERW_VALID >> i) & 1 ? 0x200 : 0x240;
+ regs.eip = (unsigned long)&instr[0];
+ regs.ecx = sel;
+ regs.edx = 0;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[3]) )
+ goto fail;
+ if ( regs.eflags != ((VERW_VALID >> i) & 1 ? 0x240 : 0x200) )
+ {
+ printf("VERW %04x (type %02x) ", sel, i);
+ goto fail;
+ }
+ }
+ printf("okay\n");
+
#define decl_insn(which) extern const unsigned char which[], which##_len[]
#define put_insn(which, insn) ".pushsection .test, \"ax\", @progbits\n" \
#which ": " insn "\n" \
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -182,7 +182,7 @@ static const opcode_desc_t opcode_table[
static const opcode_desc_t twobyte_table[256] = {
/* 0x00 - 0x07 */
- ModRM, ImplicitOps|ModRM, ModRM, ModRM,
+ ModRM, ImplicitOps|ModRM, DstReg|SrcMem16|ModRM, DstReg|SrcMem16|ModRM,
0, ImplicitOps, ImplicitOps, ImplicitOps,
/* 0x08 - 0x0F */
ImplicitOps, ImplicitOps, 0, ImplicitOps,
@@ -1384,7 +1384,7 @@ protmode_load_seg(
}
/* System segment descriptors must reside in the GDT. */
- if ( !is_x86_user_segment(seg) && (sel & 4) )
+ if ( is_x86_system_segment(seg) && (sel & 4) )
goto raise_exn;
switch ( rc = ops->read(sel_seg, sel & 0xfff8, &desc, sizeof(desc), ctxt) )
@@ -1401,14 +1401,11 @@ protmode_load_seg(
return rc;
}
- if ( !is_x86_user_segment(seg) )
- {
- /* System segments must have S flag == 0. */
- if ( desc.b & (1u << 12) )
- goto raise_exn;
- }
+ /* System segments must have S flag == 0. */
+ if ( is_x86_system_segment(seg) && (desc.b & (1u << 12)) )
+ goto raise_exn;
/* User segments must have S flag == 1. */
- else if ( !(desc.b & (1u << 12)) )
+ if ( is_x86_user_segment(seg) && !(desc.b & (1u << 12)) )
goto raise_exn;
dpl = (desc.b >> 13) & 3;
@@ -1470,10 +1467,17 @@ protmode_load_seg(
((dpl < cpl) || (dpl < rpl)) )
goto raise_exn;
break;
+ case x86_seg_none:
+ /* Non-conforming segment: check DPL against RPL and CPL. */
+ if ( ((desc.b & (0x1c << 8)) != (0x1c << 8)) &&
+ ((dpl < cpl) || (dpl < rpl)) )
+ return X86EMUL_EXCEPTION;
+ a_flag = 0;
+ break;
}
/* Segment present in memory? */
- if ( !(desc.b & (1 << 15)) )
+ if ( !(desc.b & (1 << 15)) && seg != x86_seg_none )
{
fault_type = seg != x86_seg_ss ? EXC_NP : EXC_SS;
goto raise_exn;
@@ -1481,7 +1485,7 @@ protmode_load_seg(
if ( !is_x86_user_segment(seg) )
{
- int lm = in_longmode(ctxt, ops);
+ int lm = (desc.b & (1u << 12)) ? 0 : in_longmode(ctxt, ops);
if ( lm < 0 )
return X86EMUL_UNHANDLEABLE;
@@ -1501,7 +1505,8 @@ protmode_load_seg(
return rc;
}
if ( (desc_hi.b & 0x00001f00) ||
- !is_canonical_address((uint64_t)desc_hi.a << 32) )
+ (seg != x86_seg_none &&
+ !is_canonical_address((uint64_t)desc_hi.a << 32)) )
goto raise_exn;
}
}
@@ -1544,7 +1549,8 @@ protmode_load_seg(
return X86EMUL_OKAY;
raise_exn:
- generate_exception(fault_type, sel & 0xfffc);
+ generate_exception_if(seg != x86_seg_none, fault_type, sel & 0xfffc);
+ rc = X86EMUL_EXCEPTION;
done:
return rc;
}
@@ -4424,6 +4430,28 @@ x86_emulate(
if ( (rc = load_seg(seg, src.val, 0, NULL, ctxt, ops)) != 0 )
goto done;
break;
+ case 4: /* verr / verw */
+ _regs.eflags &= ~EFLG_ZF;
+ switch ( rc = protmode_load_seg(x86_seg_none, src.val, false,
+ &sreg, ctxt, ops) )
+ {
+ case X86EMUL_OKAY:
+ if ( sreg.attr.fields.s &&
+ ((modrm_reg & 1) ? ((sreg.attr.fields.type & 0xa) == 0x2)
+ : ((sreg.attr.fields.type & 0xa) != 0x8)) )
+ _regs.eflags |= EFLG_ZF;
+ break;
+ case X86EMUL_EXCEPTION:
+ if ( ctxt->event_pending )
+ {
+ default:
+ goto done;
+ }
+ /* Instead of the exception, ZF remains cleared. */
+ rc = X86EMUL_OKAY;
+ break;
+ }
+ break;
default:
generate_exception_if(true, EXC_UD);
break;
@@ -4621,6 +4649,96 @@ x86_emulate(
break;
}
+ case X86EMUL_OPC(0x0f, 0x02): /* lar */
+ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
+ _regs.eflags &= ~EFLG_ZF;
+ switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
+ ctxt, ops) )
+ {
+ case X86EMUL_OKAY:
+ if ( !sreg.attr.fields.s )
+ {
+ switch ( sreg.attr.fields.type )
+ {
+ case 0x01: /* available 16-bit TSS */
+ case 0x03: /* busy 16-bit TSS */
+ case 0x04: /* 16-bit call gate */
+ case 0x05: /* 16/32-bit task gate */
+ if ( in_longmode(ctxt, ops) )
+ break;
+ /* fall through */
+ case 0x02: /* LDT */
+ case 0x09: /* available 32/64-bit TSS */
+ case 0x0b: /* busy 32/64-bit TSS */
+ case 0x0c: /* 32/64-bit call gate */
+ _regs.eflags |= EFLG_ZF;
+ break;
+ }
+ }
+ else
+ _regs.eflags |= EFLG_ZF;
+ break;
+ case X86EMUL_EXCEPTION:
+ if ( ctxt->event_pending )
+ {
+ default:
+ goto done;
+ }
+ /* Instead of the exception, ZF remains cleared. */
+ rc = X86EMUL_OKAY;
+ break;
+ }
+ if ( _regs.eflags & EFLG_ZF )
+ dst.val = ((sreg.attr.bytes & 0xff) << 8) |
+ ((sreg.limit >> (sreg.attr.fields.g ? 12 : 0)) &
+ 0xf0000) |
+ ((sreg.attr.bytes & 0xf00) << 12);
+ else
+ dst.type = OP_NONE;
+ break;
+
+ case X86EMUL_OPC(0x0f, 0x03): /* lsl */
+ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
+ _regs.eflags &= ~EFLG_ZF;
+ switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
+ ctxt, ops) )
+ {
+ case X86EMUL_OKAY:
+ if ( !sreg.attr.fields.s )
+ {
+ switch ( sreg.attr.fields.type )
+ {
+ case 0x01: /* available 16-bit TSS */
+ case 0x03: /* busy 16-bit TSS */
+ if ( in_longmode(ctxt, ops) )
+ break;
+ /* fall through */
+ case 0x02: /* LDT */
+ case 0x09: /* available 32/64-bit TSS */
+ case 0x0b: /* busy 32/64-bit TSS */
+ _regs.eflags |= EFLG_ZF;
+ break;
+ }
+ }
+ else
+ _regs.eflags |= EFLG_ZF;
+ break;
+ case X86EMUL_EXCEPTION:
+ if ( ctxt->event_pending )
+ {
+ default:
+ goto done;
+ }
+ /* Instead of the exception, ZF remains cleared. */
+ rc = X86EMUL_OKAY;
+ break;
+ }
+ if ( _regs.eflags & EFLG_ZF )
+ dst.val = sreg.limit;
+ else
+ dst.type = OP_NONE;
+ break;
+
case X86EMUL_OPC(0x0f, 0x05): /* syscall */ {
uint64_t msr_content;
[-- Attachment #3: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 1/2] x86emul: support 64-bit segment descriptor types
2016-12-08 11:51 ` [PATCH 1/2] x86emul: support 64-bit segment descriptor types Jan Beulich
@ 2016-12-08 11:53 ` Andrew Cooper
0 siblings, 0 replies; 6+ messages in thread
From: Andrew Cooper @ 2016-12-08 11:53 UTC (permalink / raw
To: Jan Beulich, xen-devel
On 08/12/16 11:51, Jan Beulich wrote:
> This is a prereq particularly to eventually supporting UMIP emulation,
> but also for LAR/LSL/VERR/VERW.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 2/2] x86emul: support LAR/LSL/VERR/VERW
2016-12-08 11:52 ` [PATCH 2/2] x86emul: support LAR/LSL/VERR/VERW Jan Beulich
@ 2016-12-08 16:24 ` Andrew Cooper
2016-12-09 10:40 ` Jan Beulich
0 siblings, 1 reply; 6+ messages in thread
From: Andrew Cooper @ 2016-12-08 16:24 UTC (permalink / raw
To: Jan Beulich, xen-devel
On 08/12/16 11:52, Jan Beulich wrote:
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -182,7 +182,7 @@ static const opcode_desc_t opcode_table[
>
> static const opcode_desc_t twobyte_table[256] = {
> /* 0x00 - 0x07 */
> - ModRM, ImplicitOps|ModRM, ModRM, ModRM,
> + ModRM, ImplicitOps|ModRM, DstReg|SrcMem16|ModRM, DstReg|SrcMem16|ModRM,
> 0, ImplicitOps, ImplicitOps, ImplicitOps,
> /* 0x08 - 0x0F */
> ImplicitOps, ImplicitOps, 0, ImplicitOps,
> @@ -1384,7 +1384,7 @@ protmode_load_seg(
> }
A number of the following changes were very confusing to follow until I
realised that you are introducing a meaning, where
protmode_load_seg(x86_seg_none, ...) means "please do all the checks,
but don't commit any state or raise any exceptions".
It would be helpful to point this out in the commit message and in a
comment at the head of protmode_load_seg().
>
> /* System segment descriptors must reside in the GDT. */
> - if ( !is_x86_user_segment(seg) && (sel & 4) )
> + if ( is_x86_system_segment(seg) && (sel & 4) )
> goto raise_exn;
>
> switch ( rc = ops->read(sel_seg, sel & 0xfff8, &desc, sizeof(desc), ctxt) )
> @@ -1401,14 +1401,11 @@ protmode_load_seg(
> return rc;
> }
>
> - if ( !is_x86_user_segment(seg) )
> - {
> - /* System segments must have S flag == 0. */
> - if ( desc.b & (1u << 12) )
> - goto raise_exn;
> - }
> + /* System segments must have S flag == 0. */
> + if ( is_x86_system_segment(seg) && (desc.b & (1u << 12)) )
> + goto raise_exn;
> /* User segments must have S flag == 1. */
> - else if ( !(desc.b & (1u << 12)) )
> + if ( is_x86_user_segment(seg) && !(desc.b & (1u << 12)) )
> goto raise_exn;
This might be clearer as (and is definitely shorter as)
/* Check .S is correct for the type of segment. */
if ( seg != x86_seg_none &&
is_x86_user_segment(seg) != (desc.b & (1u << 12)) )
goto raise_exn;
>
> dpl = (desc.b >> 13) & 3;
> @@ -1470,10 +1467,17 @@ protmode_load_seg(
> ((dpl < cpl) || (dpl < rpl)) )
> goto raise_exn;
> break;
> + case x86_seg_none:
> + /* Non-conforming segment: check DPL against RPL and CPL. */
> + if ( ((desc.b & (0x1c << 8)) != (0x1c << 8)) &&
> + ((dpl < cpl) || (dpl < rpl)) )
> + return X86EMUL_EXCEPTION;
I realise it is no functional change, but it would be cleaner to have
this as a goto raise_exn, to avoid having one spurious early-exit in a
fucntion which otherwise always goes to raise_exn or done.
> + a_flag = 0;
> + break;
> }
>
> /* Segment present in memory? */
> - if ( !(desc.b & (1 << 15)) )
> + if ( !(desc.b & (1 << 15)) && seg != x86_seg_none )
What is the purpose of this change, given that the raise_exn case is
always conditional on seg != x86_seg_none ?
> {
> fault_type = seg != x86_seg_ss ? EXC_NP : EXC_SS;
> goto raise_exn;
> @@ -1481,7 +1485,7 @@ protmode_load_seg(
>
> if ( !is_x86_user_segment(seg) )
> {
> - int lm = in_longmode(ctxt, ops);
> + int lm = (desc.b & (1u << 12)) ? 0 : in_longmode(ctxt, ops);
>
> if ( lm < 0 )
> return X86EMUL_UNHANDLEABLE;
> @@ -1501,7 +1505,8 @@ protmode_load_seg(
> return rc;
> }
> if ( (desc_hi.b & 0x00001f00) ||
> - !is_canonical_address((uint64_t)desc_hi.a << 32) )
> + (seg != x86_seg_none &&
> + !is_canonical_address((uint64_t)desc_hi.a << 32)) )
> goto raise_exn;
> }
> }
> @@ -1544,7 +1549,8 @@ protmode_load_seg(
> return X86EMUL_OKAY;
>
> raise_exn:
> - generate_exception(fault_type, sel & 0xfffc);
> + generate_exception_if(seg != x86_seg_none, fault_type, sel & 0xfffc);
> + rc = X86EMUL_EXCEPTION;
> done:
> return rc;
> }
> @@ -4424,6 +4430,28 @@ x86_emulate(
> if ( (rc = load_seg(seg, src.val, 0, NULL, ctxt, ops)) != 0 )
> goto done;
> break;
> + case 4: /* verr / verw */
This looks wrong, but I see it isn't actually. Whether this patch or a
subsequent one, it would be clearer to alter the switch statement not to
mask off the bottom bit, and have individual case labels for the
instructions.
> + _regs.eflags &= ~EFLG_ZF;
> + switch ( rc = protmode_load_seg(x86_seg_none, src.val, false,
> + &sreg, ctxt, ops) )
> + {
> + case X86EMUL_OKAY:
> + if ( sreg.attr.fields.s &&
> + ((modrm_reg & 1) ? ((sreg.attr.fields.type & 0xa) == 0x2)
> + : ((sreg.attr.fields.type & 0xa) != 0x8)) )
> + _regs.eflags |= EFLG_ZF;
> + break;
> + case X86EMUL_EXCEPTION:
Could we please annotate the areas where we selectively passing
exceptions? I can see this pattern getting confusing if we don't.
Something like:
/* #PF needs to escape. #GP should have been squashed already. */
> + if ( ctxt->event_pending )
> + {
> + default:
> + goto done;
> + }
> + /* Instead of the exception, ZF remains cleared. */
> + rc = X86EMUL_OKAY;
> + break;
> + }
> + break;
> default:
> generate_exception_if(true, EXC_UD);
> break;
> @@ -4621,6 +4649,96 @@ x86_emulate(
> break;
> }
>
> + case X86EMUL_OPC(0x0f, 0x02): /* lar */
> + generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
As a tangential point, the distinction between the various in_*()
predicates is sufficiently subtle that I keep on having to look it up to
check.
What do you think about replacing the current predicates with a single
mode_is() predicate which takes an or-able set of REAL|VM86|PROT|LONG
flags ?
> + _regs.eflags &= ~EFLG_ZF;
> + switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
> + ctxt, ops) )
> + {
> + case X86EMUL_OKAY:
> + if ( !sreg.attr.fields.s )
> + {
> + switch ( sreg.attr.fields.type )
> + {
> + case 0x01: /* available 16-bit TSS */
> + case 0x03: /* busy 16-bit TSS */
> + case 0x04: /* 16-bit call gate */
> + case 0x05: /* 16/32-bit task gate */
> + if ( in_longmode(ctxt, ops) )
> + break;
> + /* fall through */
> + case 0x02: /* LDT */
According to the Intel manual, LDT is valid in protected mode, but not
valid in long mode.
This appears to be a functional difference from AMD, who permit querying
LDT in long mode.
I haven't checked what real hardware behaviour is. Given that Intel
documents LDT as valid for LSL, I wonder whether this is a documentation
error.
> + case 0x09: /* available 32/64-bit TSS */
> + case 0x0b: /* busy 32/64-bit TSS */
> + case 0x0c: /* 32/64-bit call gate */
> + _regs.eflags |= EFLG_ZF;
> + break;
> + }
> + }
> + else
> + _regs.eflags |= EFLG_ZF;
> + break;
> + case X86EMUL_EXCEPTION:
> + if ( ctxt->event_pending )
> + {
> + default:
> + goto done;
> + }
> + /* Instead of the exception, ZF remains cleared. */
> + rc = X86EMUL_OKAY;
> + break;
> + }
> + if ( _regs.eflags & EFLG_ZF )
> + dst.val = ((sreg.attr.bytes & 0xff) << 8) |
> + ((sreg.limit >> (sreg.attr.fields.g ? 12 : 0)) &
> + 0xf0000) |
> + ((sreg.attr.bytes & 0xf00) << 12);
Is this correct? The AMD manuals says for 16bit, attr & 0xff00, and for
32 or 64bit, attr & 0xffff00.
The Intel manual describes this in a far more complicated way, but still
looks compatible.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 2/2] x86emul: support LAR/LSL/VERR/VERW
2016-12-08 16:24 ` Andrew Cooper
@ 2016-12-09 10:40 ` Jan Beulich
0 siblings, 0 replies; 6+ messages in thread
From: Jan Beulich @ 2016-12-09 10:40 UTC (permalink / raw
To: Andrew Cooper; +Cc: xen-devel
>>> On 08.12.16 at 17:24, <andrew.cooper3@citrix.com> wrote:
> On 08/12/16 11:52, Jan Beulich wrote:
>> @@ -1401,14 +1401,11 @@ protmode_load_seg(
>> return rc;
>> }
>>
>> - if ( !is_x86_user_segment(seg) )
>> - {
>> - /* System segments must have S flag == 0. */
>> - if ( desc.b & (1u << 12) )
>> - goto raise_exn;
>> - }
>> + /* System segments must have S flag == 0. */
>> + if ( is_x86_system_segment(seg) && (desc.b & (1u << 12)) )
>> + goto raise_exn;
>> /* User segments must have S flag == 1. */
>> - else if ( !(desc.b & (1u << 12)) )
>> + if ( is_x86_user_segment(seg) && !(desc.b & (1u << 12)) )
>> goto raise_exn;
>
> This might be clearer as (and is definitely shorter as)
>
> /* Check .S is correct for the type of segment. */
> if ( seg != x86_seg_none &&
> is_x86_user_segment(seg) != (desc.b & (1u << 12)) )
> goto raise_exn;
I had it that way first, and then thought the opposite (the explicit
two if()-s being more clear). I'd prefer to keep it as is, but if you
feel strongly about the other variant being better, I can change it.
>> @@ -1470,10 +1467,17 @@ protmode_load_seg(
>> ((dpl < cpl) || (dpl < rpl)) )
>> goto raise_exn;
>> break;
>> + case x86_seg_none:
>> + /* Non-conforming segment: check DPL against RPL and CPL. */
>> + if ( ((desc.b & (0x1c << 8)) != (0x1c << 8)) &&
>> + ((dpl < cpl) || (dpl < rpl)) )
>> + return X86EMUL_EXCEPTION;
>
> I realise it is no functional change, but it would be cleaner to have
> this as a goto raise_exn, to avoid having one spurious early-exit in a
> fucntion which otherwise always goes to raise_exn or done.
That's a matter of taste I think - to me it seems better to make
immediately obvious that no exception will be raised in this case
(which "goto raise_exn" would suggest).
>> /* Segment present in memory? */
>> - if ( !(desc.b & (1 << 15)) )
>> + if ( !(desc.b & (1 << 15)) && seg != x86_seg_none )
>
> What is the purpose of this change, given that the raise_exn case is
> always conditional on seg != x86_seg_none ?
Well - we don't want to reach raise_exn in this case, i.e. we
want to take the implicit "else" path. After all a clear P bit does
not result in the instructions to fail.
>> @@ -4424,6 +4430,28 @@ x86_emulate(
>> if ( (rc = load_seg(seg, src.val, 0, NULL, ctxt, ops)) != 0 )
>> goto done;
>> break;
>> + case 4: /* verr / verw */
>
> This looks wrong, but I see it isn't actually. Whether this patch or a
> subsequent one, it would be clearer to alter the switch statement not to
> mask off the bottom bit, and have individual case labels for the
> instructions.
Again a case where I think the masking off of the low bit is the
better approach. I wouldn't object to a patch altering it, but I'm
not convinced enough to put one together myself. And no, I
don't think this should be done in this patch.
>> + _regs.eflags &= ~EFLG_ZF;
>> + switch ( rc = protmode_load_seg(x86_seg_none, src.val, false,
>> + &sreg, ctxt, ops) )
>> + {
>> + case X86EMUL_OKAY:
>> + if ( sreg.attr.fields.s &&
>> + ((modrm_reg & 1) ? ((sreg.attr.fields.type & 0xa) == 0x2)
>> + : ((sreg.attr.fields.type & 0xa) != 0x8)) )
>> + _regs.eflags |= EFLG_ZF;
>> + break;
>> + case X86EMUL_EXCEPTION:
>
> Could we please annotate the areas where we selectively passing
> exceptions? I can see this pattern getting confusing if we don't.
> Something like:
>
> /* #PF needs to escape. #GP should have been squashed already. */
Hmm, we're not selectively passing exceptions here; we pass any
which have got raised, and #GP/#SS/#NP aren't among them. So
I think the comment may rather want to be "Only #PF can come
here", which then we could as well ASSERT() ...
>> + if ( ctxt->event_pending )
>> + {
... here (and that would then serve as a de-facto comment).
What do you think?
>> @@ -4621,6 +4649,96 @@ x86_emulate(
>> break;
>> }
>>
>> + case X86EMUL_OPC(0x0f, 0x02): /* lar */
>> + generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
>
> As a tangential point, the distinction between the various in_*()
> predicates is sufficiently subtle that I keep on having to look it up to
> check.
>
> What do you think about replacing the current predicates with a single
> mode_is() predicate which takes an or-able set of REAL|VM86|PROT|LONG
> flags ?
And you mean x86_decode() to set them once at its beginning?
That would make e.g. read_msr() a required hook, which I don't
think is a good idea (the more that x86_decode(), which in
particular gets passed almost no hooks at all from
x86_decode_insn(), doesn't need to know whether we're
emulating long mode).
If anything this would therefore need to be another input coming
from the original callers (complementing addr_size and sp_size).
>> + _regs.eflags &= ~EFLG_ZF;
>> + switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
>> + ctxt, ops) )
>> + {
>> + case X86EMUL_OKAY:
>> + if ( !sreg.attr.fields.s )
>> + {
>> + switch ( sreg.attr.fields.type )
>> + {
>> + case 0x01: /* available 16-bit TSS */
>> + case 0x03: /* busy 16-bit TSS */
>> + case 0x04: /* 16-bit call gate */
>> + case 0x05: /* 16/32-bit task gate */
>> + if ( in_longmode(ctxt, ops) )
>> + break;
>> + /* fall through */
>> + case 0x02: /* LDT */
>
> According to the Intel manual, LDT is valid in protected mode, but not
> valid in long mode.
>
> This appears to be a functional difference from AMD, who permit querying
> LDT in long mode.
>
> I haven't checked what real hardware behaviour is. Given that Intel
> documents LDT as valid for LSL, I wonder whether this is a documentation
> error.
I did check all this on hardware, so yes, looks to be a doc error.
>> + case 0x09: /* available 32/64-bit TSS */
>> + case 0x0b: /* busy 32/64-bit TSS */
>> + case 0x0c: /* 32/64-bit call gate */
>> + _regs.eflags |= EFLG_ZF;
>> + break;
>> + }
>> + }
>> + else
>> + _regs.eflags |= EFLG_ZF;
>> + break;
>> + case X86EMUL_EXCEPTION:
>> + if ( ctxt->event_pending )
>> + {
>> + default:
>> + goto done;
>> + }
>> + /* Instead of the exception, ZF remains cleared. */
>> + rc = X86EMUL_OKAY;
>> + break;
>> + }
>> + if ( _regs.eflags & EFLG_ZF )
>> + dst.val = ((sreg.attr.bytes & 0xff) << 8) |
>> + ((sreg.limit >> (sreg.attr.fields.g ? 12 : 0)) &
>> + 0xf0000) |
>> + ((sreg.attr.bytes & 0xf00) << 12);
>
> Is this correct? The AMD manuals says for 16bit, attr & 0xff00, and for
> 32 or 64bit, attr & 0xffff00.
Well - as for all operations truncation to operand size will occur
during writeback of dst.val to the destination register.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2016-12-09 10:40 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-12-08 11:42 [PATCH 0/2] x86emul: segment handling additions Jan Beulich
2016-12-08 11:51 ` [PATCH 1/2] x86emul: support 64-bit segment descriptor types Jan Beulich
2016-12-08 11:53 ` Andrew Cooper
2016-12-08 11:52 ` [PATCH 2/2] x86emul: support LAR/LSL/VERR/VERW Jan Beulich
2016-12-08 16:24 ` Andrew Cooper
2016-12-09 10:40 ` Jan Beulich
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.