ia64/xen-unstable

changeset 19615:13a4f4e6d0a3

x86 hvm: Correctly emulate task switches into vm86 mode.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue May 19 02:12:04 2009 +0100 (2009-05-19)
parents e421fd04e150
children 61501fa86b1b
files xen/arch/x86/hvm/hvm.c
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Tue May 19 02:09:36 2009 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Tue May 19 02:12:04 2009 +0100
     1.3 @@ -1193,12 +1193,24 @@ static void hvm_unmap_entry(void *p)
     1.4  }
     1.5  
     1.6  static int hvm_load_segment_selector(
     1.7 -    struct vcpu *v, enum x86_segment seg, uint16_t sel)
     1.8 +    enum x86_segment seg, uint16_t sel)
     1.9  {
    1.10      struct segment_register desctab, cs, segr;
    1.11      struct desc_struct *pdesc, desc;
    1.12      u8 dpl, rpl, cpl;
    1.13      int fault_type = TRAP_invalid_tss;
    1.14 +    struct cpu_user_regs *regs = guest_cpu_user_regs();
    1.15 +    struct vcpu *v = current;
    1.16 +
    1.17 +    if ( regs->eflags & EF_VM )
    1.18 +    {
    1.19 +        segr.sel = sel;
    1.20 +        segr.base = (uint32_t)sel << 4;
    1.21 +        segr.limit = 0xffffu;
    1.22 +        segr.attr.bytes = 0xf3;
    1.23 +        hvm_set_segment_register(v, seg, &segr);
    1.24 +        return 0;
    1.25 +    }
    1.26  
    1.27      /* NULL selector? */
    1.28      if ( (sel & 0xfffc) == 0 )
    1.29 @@ -1445,13 +1457,13 @@ void hvm_task_switch(
    1.30      }
    1.31  
    1.32      exn_raised = 0;
    1.33 -    if ( hvm_load_segment_selector(v, x86_seg_ldtr, tss.ldt) ||
    1.34 -         hvm_load_segment_selector(v, x86_seg_es, tss.es) ||
    1.35 -         hvm_load_segment_selector(v, x86_seg_cs, tss.cs) ||
    1.36 -         hvm_load_segment_selector(v, x86_seg_ss, tss.ss) ||
    1.37 -         hvm_load_segment_selector(v, x86_seg_ds, tss.ds) ||
    1.38 -         hvm_load_segment_selector(v, x86_seg_fs, tss.fs) ||
    1.39 -         hvm_load_segment_selector(v, x86_seg_gs, tss.gs) )
    1.40 +    if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt) ||
    1.41 +         hvm_load_segment_selector(x86_seg_es, tss.es) ||
    1.42 +         hvm_load_segment_selector(x86_seg_cs, tss.cs) ||
    1.43 +         hvm_load_segment_selector(x86_seg_ss, tss.ss) ||
    1.44 +         hvm_load_segment_selector(x86_seg_ds, tss.ds) ||
    1.45 +         hvm_load_segment_selector(x86_seg_fs, tss.fs) ||
    1.46 +         hvm_load_segment_selector(x86_seg_gs, tss.gs) )
    1.47          exn_raised = 1;
    1.48  
    1.49      rc = hvm_copy_to_guest_virt(