summaryrefslogtreecommitdiffstats
path: root/target-i386/seg_helper.c
diff options
context:
space:
mode:
authorPeter Maydell2014-06-05 20:16:28 +0200
committerPeter Maydell2014-06-05 20:16:28 +0200
commit9f0355b590ac523d0c4e67c416c3f9cf7af3d574 (patch)
tree59be39e43a8014ec6031a0afe60dec29b6b3e032 /target-i386/seg_helper.c
parentMerge remote-tracking branch 'remotes/kraxel/tags/pull-input-10' into staging (diff)
parentkvm: Fix eax for cpuid leaf 0x40000000 (diff)
downloadqemu-9f0355b590ac523d0c4e67c416c3f9cf7af3d574.tar.gz
qemu-9f0355b590ac523d0c4e67c416c3f9cf7af3d574.tar.xz
qemu-9f0355b590ac523d0c4e67c416c3f9cf7af3d574.zip
Merge remote-tracking branch 'remotes/kvm/uq/master' into staging
* remotes/kvm/uq/master: kvm: Fix eax for cpuid leaf 0x40000000 kvmclock: Ensure proper env->tsc value for kvmclock_current_nsec calculation kvm: Enable -cpu option to hide KVM kvm: Ensure negative return value on kvm_init() error handling path target-i386: set CC_OP to CC_OP_EFLAGS in cpu_load_eflags target-i386: get CPL from SS.DPL target-i386: rework CPL checks during task switch, preparing for next patch target-i386: fix segment flags for SMM and VM86 mode target-i386: Fix vm86 mode regression introduced in fd460606fd6f. kvm_stat: allow choosing between tracepoints and old stats kvmclock: Ensure time in migration never goes backward Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-i386/seg_helper.c')
-rw-r--r--target-i386/seg_helper.c47
1 files changed, 23 insertions, 24 deletions
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 258aae806a..51c2833ea5 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -88,8 +88,10 @@ static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
{
selector &= 0xffff;
- cpu_x86_load_seg_cache(env, seg, selector,
- (selector << 4), 0xffff, 0);
+
+ cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (3 << DESC_DPL_SHIFT));
}
static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
@@ -133,11 +135,10 @@ static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
}
}
-/* XXX: merge with load_seg() */
-static void tss_load_seg(CPUX86State *env, int seg_reg, int selector)
+static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
{
uint32_t e1, e2;
- int rpl, dpl, cpl;
+ int rpl, dpl;
if ((selector & 0xfffc) != 0) {
if (load_segment(env, &e1, &e2, selector) != 0) {
@@ -148,18 +149,13 @@ static void tss_load_seg(CPUX86State *env, int seg_reg, int selector)
}
rpl = selector & 3;
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
if (seg_reg == R_CS) {
if (!(e2 & DESC_CS_MASK)) {
raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
}
- /* XXX: is it correct? */
if (dpl != rpl) {
raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
}
- if ((e2 & DESC_C_MASK) && dpl > rpl) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
- }
} else if (seg_reg == R_SS) {
/* SS must be writable data */
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
@@ -446,12 +442,13 @@ static void switch_tss(CPUX86State *env, int tss_selector,
/* load the segments */
if (!(new_eflags & VM_MASK)) {
- tss_load_seg(env, R_CS, new_segs[R_CS]);
- tss_load_seg(env, R_SS, new_segs[R_SS]);
- tss_load_seg(env, R_ES, new_segs[R_ES]);
- tss_load_seg(env, R_DS, new_segs[R_DS]);
- tss_load_seg(env, R_FS, new_segs[R_FS]);
- tss_load_seg(env, R_GS, new_segs[R_GS]);
+ int cpl = new_segs[R_CS] & 3;
+ tss_load_seg(env, R_CS, new_segs[R_CS], cpl);
+ tss_load_seg(env, R_SS, new_segs[R_SS], cpl);
+ tss_load_seg(env, R_ES, new_segs[R_ES], cpl);
+ tss_load_seg(env, R_DS, new_segs[R_DS], cpl);
+ tss_load_seg(env, R_FS, new_segs[R_FS], cpl);
+ tss_load_seg(env, R_GS, new_segs[R_GS], cpl);
}
/* check that env->eip is in the CS segment limits */
@@ -558,6 +555,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
int has_error_code, new_stack, shift;
uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
uint32_t old_eip, sp_mask;
+ int vm86 = env->eflags & VM_MASK;
has_error_code = 0;
if (!is_int && !is_hw) {
@@ -673,7 +671,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
ssp = get_seg_base(ss_e1, ss_e2);
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
/* to same privilege */
- if (env->eflags & VM_MASK) {
+ if (vm86) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
new_stack = 0;
@@ -694,14 +692,14 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
#if 0
/* XXX: check that enough room is available */
push_size = 6 + (new_stack << 2) + (has_error_code << 1);
- if (env->eflags & VM_MASK) {
+ if (vm86) {
push_size += 8;
}
push_size <<= shift;
#endif
if (shift == 1) {
if (new_stack) {
- if (env->eflags & VM_MASK) {
+ if (vm86) {
PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
@@ -718,7 +716,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
}
} else {
if (new_stack) {
- if (env->eflags & VM_MASK) {
+ if (vm86) {
PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
@@ -742,7 +740,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
if (new_stack) {
- if (env->eflags & VM_MASK) {
+ if (vm86) {
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
@@ -1600,7 +1598,6 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
}
next_eip = env->eip + next_eip_addend;
switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
- CC_OP = CC_OP_EFLAGS;
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -1769,7 +1766,6 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
- CC_OP = CC_OP_EFLAGS;
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -2464,9 +2460,12 @@ void helper_verw(CPUX86State *env, target_ulong selector1)
void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
{
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ int dpl = (env->eflags & VM_MASK) ? 3 : 0;
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg_reg, selector,
- (selector << 4), 0xffff, 0);
+ (selector << 4), 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
} else {
helper_load_seg(env, seg_reg, selector);
}