ia64/xen-unstable

changeset 17762:9c14ba60616d

x86 hvm: Support task switch when task state segments straddle page
boundaries. Also improve error diagnostics from hvm_map().

Fixes multi-processor shutdown for some types of Windows OS.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri May 30 16:30:40 2008 +0100 (2008-05-30)
parents 439a3e9459f2
children 487dc63f95ff
files xen/arch/x86/hvm/hvm.c
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Fri May 30 16:04:20 2008 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri May 30 16:30:40 2008 +0100
     1.3 @@ -1100,16 +1100,17 @@ int hvm_virtual_to_linear_addr(
     1.4      return 0;
     1.5  }
     1.6  
     1.7 -static void *hvm_map(unsigned long va, int size)
     1.8 +static void *hvm_map_entry(unsigned long va)
     1.9  {
    1.10      unsigned long gfn, mfn;
    1.11      p2m_type_t p2mt;
    1.12      uint32_t pfec;
    1.13  
    1.14 -    if ( ((va & ~PAGE_MASK) + size) > PAGE_SIZE )
    1.15 +    if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE )
    1.16      {
    1.17 -        hvm_inject_exception(TRAP_page_fault, PFEC_write_access,
    1.18 -                             (va + PAGE_SIZE - 1) & PAGE_MASK);
    1.19 +        gdprintk(XENLOG_ERR, "Descriptor table entry "
    1.20 +                 "straddles page boundary\n");
    1.21 +        domain_crash(current->domain);
    1.22          return NULL;
    1.23      }
    1.24  
    1.25 @@ -1121,7 +1122,8 @@ static void *hvm_map(unsigned long va, i
    1.26      mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
    1.27      if ( !p2m_is_ram(p2mt) )
    1.28      {
    1.29 -        hvm_inject_exception(TRAP_page_fault, pfec, va);
    1.30 +        gdprintk(XENLOG_ERR, "Failed to look up descriptor table entry\n");
    1.31 +        domain_crash(current->domain);
    1.32          return NULL;
    1.33      }
    1.34  
    1.35 @@ -1132,7 +1134,7 @@ static void *hvm_map(unsigned long va, i
    1.36      return (char *)map_domain_page(mfn) + (va & ~PAGE_MASK);
    1.37  }
    1.38  
    1.39 -static void hvm_unmap(void *p)
    1.40 +static void hvm_unmap_entry(void *p)
    1.41  {
    1.42      if ( p )
    1.43          unmap_domain_page(p);
    1.44 @@ -1168,7 +1170,7 @@ static int hvm_load_segment_selector(
    1.45      if ( ((sel & 0xfff8) + 7) > desctab.limit )
    1.46          goto fail;
    1.47  
    1.48 -    pdesc = hvm_map(desctab.base + (sel & 0xfff8), 8);
    1.49 +    pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8));
    1.50      if ( pdesc == NULL )
    1.51          goto hvm_map_fail;
    1.52  
    1.53 @@ -1228,7 +1230,7 @@ static int hvm_load_segment_selector(
    1.54      desc.b |= 0x100;
    1.55  
    1.56   skip_accessed_flag:
    1.57 -    hvm_unmap(pdesc);
    1.58 +    hvm_unmap_entry(pdesc);
    1.59  
    1.60      segr.base = (((desc.b <<  0) & 0xff000000u) |
    1.61                   ((desc.b << 16) & 0x00ff0000u) |
    1.62 @@ -1244,7 +1246,7 @@ static int hvm_load_segment_selector(
    1.63      return 0;
    1.64  
    1.65   unmap_and_fail:
    1.66 -    hvm_unmap(pdesc);
    1.67 +    hvm_unmap_entry(pdesc);
    1.68   fail:
    1.69      hvm_inject_exception(fault_type, sel & 0xfffc, 0);
    1.70   hvm_map_fail:
    1.71 @@ -1260,7 +1262,7 @@ void hvm_task_switch(
    1.72      struct segment_register gdt, tr, prev_tr, segr;
    1.73      struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
    1.74      unsigned long eflags;
    1.75 -    int exn_raised;
    1.76 +    int exn_raised, rc;
    1.77      struct {
    1.78          u16 back_link,__blh;
    1.79          u32 esp0;
    1.80 @@ -1272,7 +1274,7 @@ void hvm_task_switch(
    1.81          u32 cr3, eip, eflags, eax, ecx, edx, ebx, esp, ebp, esi, edi;
    1.82          u16 es, _3, cs, _4, ss, _5, ds, _6, fs, _7, gs, _8, ldt, _9;
    1.83          u16 trace, iomap;
    1.84 -    } *ptss, tss;
    1.85 +    } tss = { 0 };
    1.86  
    1.87      hvm_get_segment_register(v, x86_seg_gdtr, &gdt);
    1.88      hvm_get_segment_register(v, x86_seg_tr, &prev_tr);
    1.89 @@ -1285,11 +1287,11 @@ void hvm_task_switch(
    1.90          goto out;
    1.91      }
    1.92  
    1.93 -    optss_desc = hvm_map(gdt.base + (prev_tr.sel & 0xfff8), 8);
    1.94 +    optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8));
    1.95      if ( optss_desc == NULL )
    1.96          goto out;
    1.97  
    1.98 -    nptss_desc = hvm_map(gdt.base + (tss_sel & 0xfff8), 8);
    1.99 +    nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8));
   1.100      if ( nptss_desc == NULL )
   1.101          goto out;
   1.102  
   1.103 @@ -1324,84 +1326,89 @@ void hvm_task_switch(
   1.104          goto out;
   1.105      }
   1.106  
   1.107 -    ptss = hvm_map(prev_tr.base, sizeof(tss));
   1.108 -    if ( ptss == NULL )
   1.109 +    rc = hvm_copy_from_guest_virt(
   1.110 +        &tss, prev_tr.base, sizeof(tss), PFEC_page_present);
   1.111 +    if ( rc == HVMCOPY_bad_gva_to_gfn )
   1.112          goto out;
   1.113  
   1.114      eflags = regs->eflags;
   1.115      if ( taskswitch_reason == TSW_iret )
   1.116          eflags &= ~X86_EFLAGS_NT;
   1.117  
   1.118 -    ptss->cr3    = v->arch.hvm_vcpu.guest_cr[3];
   1.119 -    ptss->eip    = regs->eip;
   1.120 -    ptss->eflags = eflags;
   1.121 -    ptss->eax    = regs->eax;
   1.122 -    ptss->ecx    = regs->ecx;
   1.123 -    ptss->edx    = regs->edx;
   1.124 -    ptss->ebx    = regs->ebx;
   1.125 -    ptss->esp    = regs->esp;
   1.126 -    ptss->ebp    = regs->ebp;
   1.127 -    ptss->esi    = regs->esi;
   1.128 -    ptss->edi    = regs->edi;
   1.129 +    tss.cr3    = v->arch.hvm_vcpu.guest_cr[3];
   1.130 +    tss.eip    = regs->eip;
   1.131 +    tss.eflags = eflags;
   1.132 +    tss.eax    = regs->eax;
   1.133 +    tss.ecx    = regs->ecx;
   1.134 +    tss.edx    = regs->edx;
   1.135 +    tss.ebx    = regs->ebx;
   1.136 +    tss.esp    = regs->esp;
   1.137 +    tss.ebp    = regs->ebp;
   1.138 +    tss.esi    = regs->esi;
   1.139 +    tss.edi    = regs->edi;
   1.140  
   1.141      hvm_get_segment_register(v, x86_seg_es, &segr);
   1.142 -    ptss->es = segr.sel;
   1.143 +    tss.es = segr.sel;
   1.144      hvm_get_segment_register(v, x86_seg_cs, &segr);
   1.145 -    ptss->cs = segr.sel;
   1.146 +    tss.cs = segr.sel;
   1.147      hvm_get_segment_register(v, x86_seg_ss, &segr);
   1.148 -    ptss->ss = segr.sel;
   1.149 +    tss.ss = segr.sel;
   1.150      hvm_get_segment_register(v, x86_seg_ds, &segr);
   1.151 -    ptss->ds = segr.sel;
   1.152 +    tss.ds = segr.sel;
   1.153      hvm_get_segment_register(v, x86_seg_fs, &segr);
   1.154 -    ptss->fs = segr.sel;
   1.155 +    tss.fs = segr.sel;
   1.156      hvm_get_segment_register(v, x86_seg_gs, &segr);
   1.157 -    ptss->gs = segr.sel;
   1.158 +    tss.gs = segr.sel;
   1.159      hvm_get_segment_register(v, x86_seg_ldtr, &segr);
   1.160 -    ptss->ldt = segr.sel;
   1.161 -
   1.162 -    hvm_unmap(ptss);
   1.163 -
   1.164 -    ptss = hvm_map(tr.base, sizeof(tss));
   1.165 -    if ( ptss == NULL )
   1.166 +    tss.ldt = segr.sel;
   1.167 +
   1.168 +    rc = hvm_copy_to_guest_virt(
   1.169 +        prev_tr.base, &tss, sizeof(tss), PFEC_page_present);
   1.170 +    if ( rc == HVMCOPY_bad_gva_to_gfn )
   1.171          goto out;
   1.172  
   1.173 -    if ( hvm_set_cr3(ptss->cr3) )
   1.174 -    {
   1.175 -        hvm_unmap(ptss);
   1.176 +    rc = hvm_copy_from_guest_virt(
   1.177 +        &tss, tr.base, sizeof(tss), PFEC_page_present);
   1.178 +    if ( rc == HVMCOPY_bad_gva_to_gfn )
   1.179          goto out;
   1.180 -    }
   1.181 -
   1.182 -    regs->eip    = ptss->eip;
   1.183 -    regs->eflags = ptss->eflags | 2;
   1.184 -    regs->eax    = ptss->eax;
   1.185 -    regs->ecx    = ptss->ecx;
   1.186 -    regs->edx    = ptss->edx;
   1.187 -    regs->ebx    = ptss->ebx;
   1.188 -    regs->esp    = ptss->esp;
   1.189 -    regs->ebp    = ptss->ebp;
   1.190 -    regs->esi    = ptss->esi;
   1.191 -    regs->edi    = ptss->edi;
   1.192 +
   1.193 +    if ( hvm_set_cr3(tss.cr3) )
   1.194 +        goto out;
   1.195 +
   1.196 +    regs->eip    = tss.eip;
   1.197 +    regs->eflags = tss.eflags | 2;
   1.198 +    regs->eax    = tss.eax;
   1.199 +    regs->ecx    = tss.ecx;
   1.200 +    regs->edx    = tss.edx;
   1.201 +    regs->ebx    = tss.ebx;
   1.202 +    regs->esp    = tss.esp;
   1.203 +    regs->ebp    = tss.ebp;
   1.204 +    regs->esi    = tss.esi;
   1.205 +    regs->edi    = tss.edi;
   1.206  
   1.207      if ( (taskswitch_reason == TSW_call_or_int) )
   1.208      {
   1.209          regs->eflags |= X86_EFLAGS_NT;
   1.210 -        ptss->back_link = prev_tr.sel;
   1.211 +        tss.back_link = prev_tr.sel;
   1.212      }
   1.213  
   1.214      exn_raised = 0;
   1.215 -    if ( hvm_load_segment_selector(v, x86_seg_es, ptss->es) ||
   1.216 -         hvm_load_segment_selector(v, x86_seg_cs, ptss->cs) ||
   1.217 -         hvm_load_segment_selector(v, x86_seg_ss, ptss->ss) ||
   1.218 -         hvm_load_segment_selector(v, x86_seg_ds, ptss->ds) ||
   1.219 -         hvm_load_segment_selector(v, x86_seg_fs, ptss->fs) ||
   1.220 -         hvm_load_segment_selector(v, x86_seg_gs, ptss->gs) ||
   1.221 -         hvm_load_segment_selector(v, x86_seg_ldtr, ptss->ldt) )
   1.222 +    if ( hvm_load_segment_selector(v, x86_seg_es, tss.es) ||
   1.223 +         hvm_load_segment_selector(v, x86_seg_cs, tss.cs) ||
   1.224 +         hvm_load_segment_selector(v, x86_seg_ss, tss.ss) ||
   1.225 +         hvm_load_segment_selector(v, x86_seg_ds, tss.ds) ||
   1.226 +         hvm_load_segment_selector(v, x86_seg_fs, tss.fs) ||
   1.227 +         hvm_load_segment_selector(v, x86_seg_gs, tss.gs) ||
   1.228 +         hvm_load_segment_selector(v, x86_seg_ldtr, tss.ldt) )
   1.229          exn_raised = 1;
   1.230  
   1.231 -    if ( (ptss->trace & 1) && !exn_raised )
   1.232 +    if ( (tss.trace & 1) && !exn_raised )
   1.233          hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0);
   1.234  
   1.235 -    hvm_unmap(ptss);
   1.236 +    rc = hvm_copy_to_guest_virt(
   1.237 +        tr.base, &tss, sizeof(tss), PFEC_page_present);
   1.238 +    if ( rc == HVMCOPY_bad_gva_to_gfn )
   1.239 +        exn_raised = 1;
   1.240  
   1.241      tr.attr.fields.type = 0xb; /* busy 32-bit tss */
   1.242      hvm_set_segment_register(v, x86_seg_tr, &tr);
   1.243 @@ -1430,8 +1437,8 @@ void hvm_task_switch(
   1.244      }
   1.245  
   1.246   out:
   1.247 -    hvm_unmap(optss_desc);
   1.248 -    hvm_unmap(nptss_desc);
   1.249 +    hvm_unmap_entry(optss_desc);
   1.250 +    hvm_unmap_entry(nptss_desc);
   1.251  }
   1.252  
   1.253  #define HVMCOPY_from_guest (0u<<0)