ia64/xen-unstable

changeset 12696:d5d5915f4a7c

[XEN] Proper segmentation emulation added to HVM emulated PTE writes.
Sigmed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Nov 30 17:07:26 2006 +0000 (2006-11-30)
parents 01860d20feac
children 5adde07b4b8a
files xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/emulate.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/x86_emulate.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/svm/vmcb.h xen/include/asm-x86/x86_emulate.h
line diff
     1.1 --- a/xen/arch/x86/hvm/platform.c	Thu Nov 30 16:36:18 2006 +0000
     1.2 +++ b/xen/arch/x86/hvm/platform.c	Thu Nov 30 17:07:26 2006 +0000
     1.3 @@ -920,7 +920,7 @@ void handle_mmio(unsigned long gpa)
     1.4      df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
     1.5  
     1.6      mode = hvm_guest_x86_mode(v);
     1.7 -    inst_addr = hvm_get_segment_base(v, seg_cs) + regs->eip;
     1.8 +    inst_addr = hvm_get_segment_base(v, x86_seg_cs) + regs->eip;
     1.9      inst_len = hvm_instruction_length(inst_addr, mode);
    1.10      if ( inst_len <= 0 )
    1.11      {
    1.12 @@ -964,10 +964,10 @@ void handle_mmio(unsigned long gpa)
    1.13          addr = regs->edi;
    1.14          if ( ad_size == WORD )
    1.15              addr &= 0xFFFF;
    1.16 -        addr += hvm_get_segment_base(v, seg_es);
    1.17 +        addr += hvm_get_segment_base(v, x86_seg_es);
    1.18          if ( addr == gpa )
    1.19          {
    1.20 -            enum segment seg;
    1.21 +            enum x86_segment seg;
    1.22  
    1.23              dir = IOREQ_WRITE;
    1.24              addr = regs->esi;
    1.25 @@ -975,13 +975,13 @@ void handle_mmio(unsigned long gpa)
    1.26                  addr &= 0xFFFF;
    1.27              switch ( seg_sel )
    1.28              {
    1.29 -            case 0x26: seg = seg_es; break;
    1.30 -            case 0x2e: seg = seg_cs; break;
    1.31 -            case 0x36: seg = seg_ss; break;
    1.32 +            case 0x26: seg = x86_seg_es; break;
    1.33 +            case 0x2e: seg = x86_seg_cs; break;
    1.34 +            case 0x36: seg = x86_seg_ss; break;
    1.35              case 0:
    1.36 -            case 0x3e: seg = seg_ds; break;
    1.37 -            case 0x64: seg = seg_fs; break;
    1.38 -            case 0x65: seg = seg_gs; break;
    1.39 +            case 0x3e: seg = x86_seg_ds; break;
    1.40 +            case 0x64: seg = x86_seg_fs; break;
    1.41 +            case 0x65: seg = x86_seg_gs; break;
    1.42              default: domain_crash_synchronous();
    1.43              }
    1.44              addr += hvm_get_segment_base(v, seg);
     2.1 --- a/xen/arch/x86/hvm/svm/emulate.c	Thu Nov 30 16:36:18 2006 +0000
     2.2 +++ b/xen/arch/x86/hvm/svm/emulate.c	Thu Nov 30 17:07:26 2006 +0000
     2.3 @@ -209,7 +209,7 @@ unsigned long get_effective_addr_modrm64
     2.4  
     2.5  #if __x86_64__
     2.6          /* 64-bit mode */
     2.7 -        if (vmcb->cs.attributes.fields.l && (vmcb->efer & EFER_LMA))
     2.8 +        if (vmcb->cs.attr.fields.l && (vmcb->efer & EFER_LMA))
     2.9              return vmcb->rip + inst_len + *size + disp;
    2.10  #endif
    2.11          return disp;
    2.12 @@ -334,7 +334,7 @@ unsigned long svm_rip2pointer(struct vmc
    2.13       * no matter what kind of addressing is used.
    2.14       */
    2.15      unsigned long p = vmcb->cs.base + vmcb->rip;
    2.16 -    if (!(vmcb->cs.attributes.fields.l && vmcb->efer & EFER_LMA))
    2.17 +    if (!(vmcb->cs.attr.fields.l && vmcb->efer & EFER_LMA))
    2.18          return (u32)p; /* mask to 32 bits */
    2.19      /* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */
    2.20      return p;
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Nov 30 16:36:18 2006 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Nov 30 17:07:26 2006 +0000
     3.3 @@ -476,13 +476,13 @@ static int svm_guest_x86_mode(struct vcp
     3.4      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     3.5  
     3.6      if ( vmcb->efer & EFER_LMA )
     3.7 -        return (vmcb->cs.attributes.fields.l ?
     3.8 +        return (vmcb->cs.attr.fields.l ?
     3.9                  X86EMUL_MODE_PROT64 : X86EMUL_MODE_PROT32);
    3.10  
    3.11      if ( svm_realmode(v) )
    3.12          return X86EMUL_MODE_REAL;
    3.13  
    3.14 -    return (vmcb->cs.attributes.fields.db ?
    3.15 +    return (vmcb->cs.attr.fields.db ?
    3.16              X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16);
    3.17  }
    3.18  
    3.19 @@ -509,31 +509,51 @@ unsigned long svm_get_ctrl_reg(struct vc
    3.20      return 0;                   /* dummy */
    3.21  }
    3.22  
    3.23 -static unsigned long svm_get_segment_base(struct vcpu *v, enum segment seg)
    3.24 +static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
    3.25  {
    3.26      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.27      int long_mode = 0;
    3.28  
    3.29  #ifdef __x86_64__
    3.30 -    long_mode = vmcb->cs.attributes.fields.l && (vmcb->efer & EFER_LMA);
    3.31 +    long_mode = vmcb->cs.attr.fields.l && (vmcb->efer & EFER_LMA);
    3.32  #endif
    3.33      switch ( seg )
    3.34      {
    3.35 -    case seg_cs: return long_mode ? 0 : vmcb->cs.base;
    3.36 -    case seg_ds: return long_mode ? 0 : vmcb->ds.base;
    3.37 -    case seg_es: return long_mode ? 0 : vmcb->es.base;
    3.38 -    case seg_fs: return vmcb->fs.base;
    3.39 -    case seg_gs: return vmcb->gs.base;
    3.40 -    case seg_ss: return long_mode ? 0 : vmcb->ss.base;
    3.41 -    case seg_tr: return vmcb->tr.base;
    3.42 -    case seg_gdtr: return vmcb->gdtr.base;
    3.43 -    case seg_idtr: return vmcb->idtr.base;
    3.44 -    case seg_ldtr: return vmcb->ldtr.base;
    3.45 +    case x86_seg_cs: return long_mode ? 0 : vmcb->cs.base;
    3.46 +    case x86_seg_ds: return long_mode ? 0 : vmcb->ds.base;
    3.47 +    case x86_seg_es: return long_mode ? 0 : vmcb->es.base;
    3.48 +    case x86_seg_fs: return vmcb->fs.base;
    3.49 +    case x86_seg_gs: return vmcb->gs.base;
    3.50 +    case x86_seg_ss: return long_mode ? 0 : vmcb->ss.base;
    3.51 +    case x86_seg_tr: return vmcb->tr.base;
    3.52 +    case x86_seg_gdtr: return vmcb->gdtr.base;
    3.53 +    case x86_seg_idtr: return vmcb->idtr.base;
    3.54 +    case x86_seg_ldtr: return vmcb->ldtr.base;
    3.55      }
    3.56      BUG();
    3.57      return 0;
    3.58  }
    3.59  
    3.60 +static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
    3.61 +                                     struct segment_register *reg)
    3.62 +{
    3.63 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.64 +    switch ( seg )
    3.65 +    {
    3.66 +    case x86_seg_cs:   memcpy(reg, &vmcb->cs,   sizeof(*reg)); break;
    3.67 +    case x86_seg_ds:   memcpy(reg, &vmcb->ds,   sizeof(*reg)); break;
    3.68 +    case x86_seg_es:   memcpy(reg, &vmcb->es,   sizeof(*reg)); break;
    3.69 +    case x86_seg_fs:   memcpy(reg, &vmcb->fs,   sizeof(*reg)); break;
    3.70 +    case x86_seg_gs:   memcpy(reg, &vmcb->gs,   sizeof(*reg)); break;
    3.71 +    case x86_seg_ss:   memcpy(reg, &vmcb->ss,   sizeof(*reg)); break;
    3.72 +    case x86_seg_tr:   memcpy(reg, &vmcb->tr,   sizeof(*reg)); break;
    3.73 +    case x86_seg_gdtr: memcpy(reg, &vmcb->gdtr, sizeof(*reg)); break;
    3.74 +    case x86_seg_idtr: memcpy(reg, &vmcb->idtr, sizeof(*reg)); break;
    3.75 +    case x86_seg_ldtr: memcpy(reg, &vmcb->ldtr, sizeof(*reg)); break;
    3.76 +    default: BUG();
    3.77 +    }
    3.78 +}
    3.79 +
    3.80  /* Make sure that xen intercepts any FP accesses from current */
    3.81  static void svm_stts(struct vcpu *v) 
    3.82  {
    3.83 @@ -785,6 +805,11 @@ static void svm_vcpu_destroy(struct vcpu
    3.84      svm_destroy_vmcb(v);
    3.85  }
    3.86  
    3.87 +static void svm_hvm_inject_exception(unsigned int trapnr, int errcode)
    3.88 +{
    3.89 +    svm_inject_exception(current, trapnr, (errcode != -1), errcode);
    3.90 +}
    3.91 +
    3.92  int start_svm(void)
    3.93  {
    3.94      u32 eax, ecx, edx;
    3.95 @@ -844,12 +869,15 @@ int start_svm(void)
    3.96      hvm_funcs.guest_x86_mode = svm_guest_x86_mode;
    3.97      hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
    3.98      hvm_funcs.get_segment_base = svm_get_segment_base;
    3.99 +    hvm_funcs.get_segment_register = svm_get_segment_register;
   3.100  
   3.101      hvm_funcs.update_host_cr3 = svm_update_host_cr3;
   3.102      
   3.103      hvm_funcs.stts = svm_stts;
   3.104      hvm_funcs.set_tsc_offset = svm_set_tsc_offset;
   3.105  
   3.106 +    hvm_funcs.inject_exception = svm_hvm_inject_exception;
   3.107 +
   3.108      hvm_funcs.init_ap_context = svm_init_ap_context;
   3.109      hvm_funcs.init_hypercall_page = svm_init_hypercall_page;
   3.110  
   3.111 @@ -1154,7 +1182,7 @@ static void svm_dr_access(struct vcpu *v
   3.112  
   3.113  static void svm_get_prefix_info(
   3.114      struct vmcb_struct *vmcb, 
   3.115 -    unsigned int dir, segment_selector_t **seg, unsigned int *asize)
   3.116 +    unsigned int dir, svm_segment_register_t **seg, unsigned int *asize)
   3.117  {
   3.118      unsigned char inst[MAX_INST_LEN];
   3.119      int i;
   3.120 @@ -1235,18 +1263,18 @@ static inline int svm_get_io_address(
   3.121      unsigned long        reg;
   3.122      unsigned int         asize, isize;
   3.123      int                  long_mode = 0;
   3.124 -    segment_selector_t  *seg = NULL;
   3.125 +    svm_segment_register_t *seg = NULL;
   3.126      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   3.127  
   3.128  #ifdef __x86_64__
   3.129      /* If we're in long mode, we shouldn't check the segment presence & limit */
   3.130 -    long_mode = vmcb->cs.attributes.fields.l && vmcb->efer & EFER_LMA;
   3.131 +    long_mode = vmcb->cs.attr.fields.l && vmcb->efer & EFER_LMA;
   3.132  #endif
   3.133  
   3.134 -    /* d field of cs.attributes is 1 for 32-bit, 0 for 16 or 64 bit. 
   3.135 +    /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. 
   3.136       * l field combined with EFER_LMA -> longmode says whether it's 16 or 64 bit. 
   3.137       */
   3.138 -    asize = (long_mode)?64:((vmcb->cs.attributes.fields.db)?32:16);
   3.139 +    asize = (long_mode)?64:((vmcb->cs.attr.fields.db)?32:16);
   3.140  
   3.141  
   3.142      /* The ins/outs instructions are single byte, so if we have got more 
   3.143 @@ -1266,7 +1294,7 @@ static inline int svm_get_io_address(
   3.144          reg = regs->esi;
   3.145          if (!seg)               /* If no prefix, used DS. */
   3.146              seg = &vmcb->ds;
   3.147 -        if (!long_mode && (seg->attributes.fields.type & 0xa) == 0x8) {
   3.148 +        if (!long_mode && (seg->attr.fields.type & 0xa) == 0x8) {
   3.149              svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.150              return 0;
   3.151          }
   3.152 @@ -1275,14 +1303,14 @@ static inline int svm_get_io_address(
   3.153      {
   3.154          reg = regs->edi;
   3.155          seg = &vmcb->es;        /* Note: This is ALWAYS ES. */
   3.156 -        if (!long_mode && (seg->attributes.fields.type & 0xa) != 0x2) {
   3.157 +        if (!long_mode && (seg->attr.fields.type & 0xa) != 0x2) {
   3.158              svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.159              return 0;
   3.160          }
   3.161      }
   3.162  
   3.163      /* If the segment isn't present, give GP fault! */
   3.164 -    if (!long_mode && !seg->attributes.fields.p) 
   3.165 +    if (!long_mode && !seg->attr.fields.p) 
   3.166      {
   3.167          svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.168          return 0;
   3.169 @@ -1305,7 +1333,7 @@ static inline int svm_get_io_address(
   3.170      {
   3.171          ASSERT(*addr == (u32)*addr);
   3.172          if ((u32)(*addr + size - 1) < (u32)*addr ||
   3.173 -            (seg->attributes.fields.type & 0xc) != 0x4 ?
   3.174 +            (seg->attr.fields.type & 0xc) != 0x4 ?
   3.175              *addr + size - 1 > seg->limit :
   3.176              *addr <= seg->limit)
   3.177          {
   3.178 @@ -1318,9 +1346,9 @@ static inline int svm_get_io_address(
   3.179             occur. Note that the checking is not necessary for page granular
   3.180             segments as transfers crossing page boundaries will be broken up
   3.181             anyway. */
   3.182 -        if (!seg->attributes.fields.g && *count > 1)
   3.183 +        if (!seg->attr.fields.g && *count > 1)
   3.184          {
   3.185 -            if ((seg->attributes.fields.type & 0xc) != 0x4)
   3.186 +            if ((seg->attr.fields.type & 0xc) != 0x4)
   3.187              {
   3.188                  /* expand-up */
   3.189                  if (!(regs->eflags & EF_DF))
   3.190 @@ -2154,52 +2182,52 @@ static int svm_do_vmmcall_reset_to_realm
   3.191  
   3.192      /* setup the segment registers and all their hidden states */
   3.193      vmcb->cs.sel = 0xF000;
   3.194 -    vmcb->cs.attributes.bytes = 0x089b;
   3.195 +    vmcb->cs.attr.bytes = 0x089b;
   3.196      vmcb->cs.limit = 0xffff;
   3.197      vmcb->cs.base = 0x000F0000;
   3.198  
   3.199      vmcb->ss.sel = 0x00;
   3.200 -    vmcb->ss.attributes.bytes = 0x0893;
   3.201 +    vmcb->ss.attr.bytes = 0x0893;
   3.202      vmcb->ss.limit = 0xffff;
   3.203      vmcb->ss.base = 0x00;
   3.204  
   3.205      vmcb->ds.sel = 0x00;
   3.206 -    vmcb->ds.attributes.bytes = 0x0893;
   3.207 +    vmcb->ds.attr.bytes = 0x0893;
   3.208      vmcb->ds.limit = 0xffff;
   3.209      vmcb->ds.base = 0x00;
   3.210      
   3.211      vmcb->es.sel = 0x00;
   3.212 -    vmcb->es.attributes.bytes = 0x0893;
   3.213 +    vmcb->es.attr.bytes = 0x0893;
   3.214      vmcb->es.limit = 0xffff;
   3.215      vmcb->es.base = 0x00;
   3.216      
   3.217      vmcb->fs.sel = 0x00;
   3.218 -    vmcb->fs.attributes.bytes = 0x0893;
   3.219 +    vmcb->fs.attr.bytes = 0x0893;
   3.220      vmcb->fs.limit = 0xffff;
   3.221      vmcb->fs.base = 0x00;
   3.222      
   3.223      vmcb->gs.sel = 0x00;
   3.224 -    vmcb->gs.attributes.bytes = 0x0893;
   3.225 +    vmcb->gs.attr.bytes = 0x0893;
   3.226      vmcb->gs.limit = 0xffff;
   3.227      vmcb->gs.base = 0x00;
   3.228  
   3.229      vmcb->ldtr.sel = 0x00;
   3.230 -    vmcb->ldtr.attributes.bytes = 0x0000;
   3.231 +    vmcb->ldtr.attr.bytes = 0x0000;
   3.232      vmcb->ldtr.limit = 0x0;
   3.233      vmcb->ldtr.base = 0x00;
   3.234  
   3.235      vmcb->gdtr.sel = 0x00;
   3.236 -    vmcb->gdtr.attributes.bytes = 0x0000;
   3.237 +    vmcb->gdtr.attr.bytes = 0x0000;
   3.238      vmcb->gdtr.limit = 0x0;
   3.239      vmcb->gdtr.base = 0x00;
   3.240      
   3.241      vmcb->tr.sel = 0;
   3.242 -    vmcb->tr.attributes.bytes = 0;
   3.243 +    vmcb->tr.attr.bytes = 0;
   3.244      vmcb->tr.limit = 0x0;
   3.245      vmcb->tr.base = 0;
   3.246  
   3.247      vmcb->idtr.sel = 0x00;
   3.248 -    vmcb->idtr.attributes.bytes = 0x0000;
   3.249 +    vmcb->idtr.attr.bytes = 0x0000;
   3.250      vmcb->idtr.limit = 0x3ff;
   3.251      vmcb->idtr.base = 0x00;
   3.252  
     4.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Thu Nov 30 16:36:18 2006 +0000
     4.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu Nov 30 17:07:26 2006 +0000
     4.3 @@ -90,7 +90,7 @@ static int construct_vmcb(struct vcpu *v
     4.4  {
     4.5      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
     4.6      struct vmcb_struct *vmcb = arch_svm->vmcb;
     4.7 -    segment_attributes_t attrib;
     4.8 +    svm_segment_attributes_t attrib;
     4.9  
    4.10      /* Always flush the TLB on VMRUN. */
    4.11      vmcb->tlb_control = 1;
    4.12 @@ -166,13 +166,13 @@ static int construct_vmcb(struct vcpu *v
    4.13      attrib.fields.p = 1;      /* segment present */
    4.14      attrib.fields.db = 1;     /* 32-bit */
    4.15      attrib.fields.g = 1;      /* 4K pages in limit */
    4.16 -    vmcb->es.attributes = attrib;
    4.17 -    vmcb->ss.attributes = attrib;
    4.18 -    vmcb->ds.attributes = attrib;
    4.19 -    vmcb->fs.attributes = attrib;
    4.20 -    vmcb->gs.attributes = attrib;
    4.21 +    vmcb->es.attr = attrib;
    4.22 +    vmcb->ss.attr = attrib;
    4.23 +    vmcb->ds.attr = attrib;
    4.24 +    vmcb->fs.attr = attrib;
    4.25 +    vmcb->gs.attr = attrib;
    4.26      attrib.fields.type = 0xb; /* type=0xb -> executable/readable, accessed */
    4.27 -    vmcb->cs.attributes = attrib;
    4.28 +    vmcb->cs.attr = attrib;
    4.29  
    4.30      /* Guest IDT. */
    4.31      vmcb->idtr.base = 0;
    4.32 @@ -186,11 +186,11 @@ static int construct_vmcb(struct vcpu *v
    4.33      vmcb->ldtr.sel = 0;
    4.34      vmcb->ldtr.base = 0;
    4.35      vmcb->ldtr.limit = 0;
    4.36 -    vmcb->ldtr.attributes.bytes = 0;
    4.37 +    vmcb->ldtr.attr.bytes = 0;
    4.38  
    4.39      /* Guest TSS. */
    4.40      attrib.fields.type = 0xb; /* 32-bit TSS (busy) */
    4.41 -    vmcb->tr.attributes = attrib;
    4.42 +    vmcb->tr.attr = attrib;
    4.43      vmcb->tr.base = 0;
    4.44      vmcb->tr.limit = 0xff;
    4.45  
    4.46 @@ -278,10 +278,10 @@ void svm_do_launch(struct vcpu *v)
    4.47      v->arch.schedule_tail = arch_svm_do_resume;
    4.48  }
    4.49  
    4.50 -static void svm_dump_sel(char *name, segment_selector_t *s)
    4.51 +static void svm_dump_sel(char *name, svm_segment_register_t *s)
    4.52  {
    4.53      printk("%s: sel=0x%04x, attr=0x%04x, limit=0x%08x, base=0x%016llx\n", 
    4.54 -           name, s->sel, s->attributes.bytes, s->limit,
    4.55 +           name, s->sel, s->attr.bytes, s->limit,
    4.56             (unsigned long long)s->base);
    4.57  }
    4.58  
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 30 16:36:18 2006 +0000
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 30 17:07:26 2006 +0000
     5.3 @@ -501,7 +501,7 @@ static unsigned long vmx_get_ctrl_reg(st
     5.4      return 0;                   /* dummy */
     5.5  }
     5.6  
     5.7 -static unsigned long vmx_get_segment_base(struct vcpu *v, enum segment seg)
     5.8 +static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
     5.9  {
    5.10      unsigned long base = 0;
    5.11      int long_mode = 0;
    5.12 @@ -516,22 +516,94 @@ static unsigned long vmx_get_segment_bas
    5.13  
    5.14      switch ( seg )
    5.15      {
    5.16 -    case seg_cs: if ( !long_mode ) base = __vmread(GUEST_CS_BASE); break;
    5.17 -    case seg_ds: if ( !long_mode ) base = __vmread(GUEST_DS_BASE); break;
    5.18 -    case seg_es: if ( !long_mode ) base = __vmread(GUEST_ES_BASE); break;
    5.19 -    case seg_fs: base = __vmread(GUEST_FS_BASE); break;
    5.20 -    case seg_gs: base = __vmread(GUEST_GS_BASE); break;
    5.21 -    case seg_ss: if ( !long_mode ) base = __vmread(GUEST_SS_BASE); break;
    5.22 -    case seg_tr: base = __vmread(GUEST_TR_BASE); break;
    5.23 -    case seg_gdtr: base = __vmread(GUEST_GDTR_BASE); break;
    5.24 -    case seg_idtr: base = __vmread(GUEST_IDTR_BASE); break;
    5.25 -    case seg_ldtr: base = __vmread(GUEST_LDTR_BASE); break;
    5.26 +    case x86_seg_cs: if ( !long_mode ) base = __vmread(GUEST_CS_BASE); break;
    5.27 +    case x86_seg_ds: if ( !long_mode ) base = __vmread(GUEST_DS_BASE); break;
    5.28 +    case x86_seg_es: if ( !long_mode ) base = __vmread(GUEST_ES_BASE); break;
    5.29 +    case x86_seg_fs: base = __vmread(GUEST_FS_BASE); break;
    5.30 +    case x86_seg_gs: base = __vmread(GUEST_GS_BASE); break;
    5.31 +    case x86_seg_ss: if ( !long_mode ) base = __vmread(GUEST_SS_BASE); break;
    5.32 +    case x86_seg_tr: base = __vmread(GUEST_TR_BASE); break;
    5.33 +    case x86_seg_gdtr: base = __vmread(GUEST_GDTR_BASE); break;
    5.34 +    case x86_seg_idtr: base = __vmread(GUEST_IDTR_BASE); break;
    5.35 +    case x86_seg_ldtr: base = __vmread(GUEST_LDTR_BASE); break;
    5.36      default: BUG(); break;
    5.37      }
    5.38  
    5.39      return base;
    5.40  }
    5.41  
    5.42 +static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
    5.43 +                                     struct segment_register *reg)
    5.44 +{
    5.45 +    u16 attr = 0;
    5.46 +
    5.47 +    ASSERT(v == current);
    5.48 +
    5.49 +    switch ( seg )
    5.50 +    {
    5.51 +    case x86_seg_cs:
    5.52 +        reg->sel   = __vmread(GUEST_CS_SELECTOR);
    5.53 +        reg->limit = __vmread(GUEST_CS_LIMIT);
    5.54 +        reg->base  = __vmread(GUEST_CS_BASE);
    5.55 +        attr       = __vmread(GUEST_CS_AR_BYTES);
    5.56 +        break;
    5.57 +    case x86_seg_ds:
    5.58 +        reg->sel   = __vmread(GUEST_DS_SELECTOR);
    5.59 +        reg->limit = __vmread(GUEST_DS_LIMIT);
    5.60 +        reg->base  = __vmread(GUEST_DS_BASE);
    5.61 +        attr       = __vmread(GUEST_DS_AR_BYTES);
    5.62 +        break;
    5.63 +    case x86_seg_es:
    5.64 +        reg->sel   = __vmread(GUEST_ES_SELECTOR);
    5.65 +        reg->limit = __vmread(GUEST_ES_LIMIT);
    5.66 +        reg->base  = __vmread(GUEST_ES_BASE);
    5.67 +        attr       = __vmread(GUEST_ES_AR_BYTES);
    5.68 +        break;
    5.69 +    case x86_seg_fs:
    5.70 +        reg->sel   = __vmread(GUEST_FS_SELECTOR);
    5.71 +        reg->limit = __vmread(GUEST_FS_LIMIT);
    5.72 +        reg->base  = __vmread(GUEST_FS_BASE);
    5.73 +        attr       = __vmread(GUEST_FS_AR_BYTES);
    5.74 +        break;
    5.75 +    case x86_seg_gs:
    5.76 +        reg->sel   = __vmread(GUEST_GS_SELECTOR);
    5.77 +        reg->limit = __vmread(GUEST_GS_LIMIT);
    5.78 +        reg->base  = __vmread(GUEST_GS_BASE);
    5.79 +        attr       = __vmread(GUEST_GS_AR_BYTES);
    5.80 +        break;
    5.81 +    case x86_seg_ss:
    5.82 +        reg->sel   = __vmread(GUEST_SS_SELECTOR);
    5.83 +        reg->limit = __vmread(GUEST_SS_LIMIT);
    5.84 +        reg->base  = __vmread(GUEST_SS_BASE);
    5.85 +        attr       = __vmread(GUEST_SS_AR_BYTES);
    5.86 +        break;
    5.87 +    case x86_seg_tr:
    5.88 +        reg->sel   = __vmread(GUEST_TR_SELECTOR);
    5.89 +        reg->limit = __vmread(GUEST_TR_LIMIT);
    5.90 +        reg->base  = __vmread(GUEST_TR_BASE);
    5.91 +        attr       = __vmread(GUEST_TR_AR_BYTES);
    5.92 +        break;
    5.93 +    case x86_seg_gdtr:
    5.94 +        reg->limit = __vmread(GUEST_GDTR_LIMIT);
    5.95 +        reg->base  = __vmread(GUEST_GDTR_BASE);
    5.96 +        break;
    5.97 +    case x86_seg_idtr:
    5.98 +        reg->limit = __vmread(GUEST_IDTR_LIMIT);
    5.99 +        reg->base  = __vmread(GUEST_IDTR_BASE);
   5.100 +        break;
   5.101 +    case x86_seg_ldtr:
   5.102 +        reg->sel   = __vmread(GUEST_LDTR_SELECTOR);
   5.103 +        reg->limit = __vmread(GUEST_LDTR_LIMIT);
   5.104 +        reg->base  = __vmread(GUEST_LDTR_BASE);
   5.105 +        attr       = __vmread(GUEST_LDTR_AR_BYTES);
   5.106 +        break;
   5.107 +    default:
   5.108 +        BUG();
   5.109 +    }
   5.110 +
   5.111 +    reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
   5.112 +}
   5.113 +
   5.114  /* Make sure that xen intercepts any FP accesses from current */
   5.115  static void vmx_stts(struct vcpu *v)
   5.116  {
   5.117 @@ -630,6 +702,11 @@ static int vmx_pae_enabled(struct vcpu *
   5.118      return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
   5.119  }
   5.120  
   5.121 +static void vmx_inject_exception(unsigned int trapnr, int errcode)
   5.122 +{
   5.123 +    vmx_inject_hw_exception(current, trapnr, errcode);
   5.124 +}
   5.125 +
   5.126  /* Setup HVM interfaces */
   5.127  static void vmx_setup_hvm_funcs(void)
   5.128  {
   5.129 @@ -650,12 +727,15 @@ static void vmx_setup_hvm_funcs(void)
   5.130      hvm_funcs.guest_x86_mode = vmx_guest_x86_mode;
   5.131      hvm_funcs.get_guest_ctrl_reg = vmx_get_ctrl_reg;
   5.132      hvm_funcs.get_segment_base = vmx_get_segment_base;
   5.133 +    hvm_funcs.get_segment_register = vmx_get_segment_register;
   5.134  
   5.135      hvm_funcs.update_host_cr3 = vmx_update_host_cr3;
   5.136  
   5.137      hvm_funcs.stts = vmx_stts;
   5.138      hvm_funcs.set_tsc_offset = vmx_set_tsc_offset;
   5.139  
   5.140 +    hvm_funcs.inject_exception = vmx_inject_exception;
   5.141 +
   5.142      hvm_funcs.init_ap_context = vmx_init_ap_context;
   5.143  
   5.144      hvm_funcs.init_hypercall_page = vmx_init_hypercall_page;
   5.145 @@ -962,14 +1042,14 @@ static void vmx_do_invlpg(unsigned long 
   5.146  
   5.147  
   5.148  static int vmx_check_descriptor(int long_mode, unsigned long eip, int inst_len,
   5.149 -                                enum segment seg, unsigned long *base,
   5.150 +                                enum x86_segment seg, unsigned long *base,
   5.151                                  u32 *limit, u32 *ar_bytes)
   5.152  {
   5.153      enum vmcs_field ar_field, base_field, limit_field;
   5.154  
   5.155      *base = 0;
   5.156      *limit = 0;
   5.157 -    if ( seg != seg_es )
   5.158 +    if ( seg != x86_seg_es )
   5.159      {
   5.160          unsigned char inst[MAX_INST_LEN];
   5.161          int i;
   5.162 @@ -999,22 +1079,22 @@ static int vmx_check_descriptor(int long
   5.163  #endif
   5.164                  continue;
   5.165              case 0x2e: /* CS */
   5.166 -                seg = seg_cs;
   5.167 +                seg = x86_seg_cs;
   5.168                  continue;
   5.169              case 0x36: /* SS */
   5.170 -                seg = seg_ss;
   5.171 +                seg = x86_seg_ss;
   5.172                  continue;
   5.173              case 0x26: /* ES */
   5.174 -                seg = seg_es;
   5.175 +                seg = x86_seg_es;
   5.176                  continue;
   5.177              case 0x64: /* FS */
   5.178 -                seg = seg_fs;
   5.179 +                seg = x86_seg_fs;
   5.180                  continue;
   5.181              case 0x65: /* GS */
   5.182 -                seg = seg_gs;
   5.183 +                seg = x86_seg_gs;
   5.184                  continue;
   5.185              case 0x3e: /* DS */
   5.186 -                seg = seg_ds;
   5.187 +                seg = x86_seg_ds;
   5.188                  continue;
   5.189              }
   5.190          }
   5.191 @@ -1022,32 +1102,32 @@ static int vmx_check_descriptor(int long
   5.192  
   5.193      switch ( seg )
   5.194      {
   5.195 -    case seg_cs:
   5.196 +    case x86_seg_cs:
   5.197          ar_field = GUEST_CS_AR_BYTES;
   5.198          base_field = GUEST_CS_BASE;
   5.199          limit_field = GUEST_CS_LIMIT;
   5.200          break;
   5.201 -    case seg_ds:
   5.202 +    case x86_seg_ds:
   5.203          ar_field = GUEST_DS_AR_BYTES;
   5.204          base_field = GUEST_DS_BASE;
   5.205          limit_field = GUEST_DS_LIMIT;
   5.206          break;
   5.207 -    case seg_es:
   5.208 +    case x86_seg_es:
   5.209          ar_field = GUEST_ES_AR_BYTES;
   5.210          base_field = GUEST_ES_BASE;
   5.211          limit_field = GUEST_ES_LIMIT;
   5.212          break;
   5.213 -    case seg_fs:
   5.214 +    case x86_seg_fs:
   5.215          ar_field = GUEST_FS_AR_BYTES;
   5.216          base_field = GUEST_FS_BASE;
   5.217          limit_field = GUEST_FS_LIMIT;
   5.218          break;
   5.219 -    case seg_gs:
   5.220 +    case x86_seg_gs:
   5.221          ar_field = GUEST_FS_AR_BYTES;
   5.222          base_field = GUEST_FS_BASE;
   5.223          limit_field = GUEST_FS_LIMIT;
   5.224          break;
   5.225 -    case seg_ss:
   5.226 +    case x86_seg_ss:
   5.227          ar_field = GUEST_GS_AR_BYTES;
   5.228          base_field = GUEST_GS_BASE;
   5.229          limit_field = GUEST_GS_LIMIT;
   5.230 @@ -1057,7 +1137,7 @@ static int vmx_check_descriptor(int long
   5.231          return 0;
   5.232      }
   5.233  
   5.234 -    if ( !long_mode || seg == seg_fs || seg == seg_gs )
   5.235 +    if ( !long_mode || seg == x86_seg_fs || seg == x86_seg_gs )
   5.236      {
   5.237          *base = __vmread(base_field);
   5.238          *limit = __vmread(limit_field);
   5.239 @@ -1127,7 +1207,7 @@ static void vmx_io_instruction(unsigned 
   5.240           * selector is null.
   5.241           */
   5.242          if ( !vmx_check_descriptor(long_mode, regs->eip, inst_len,
   5.243 -                                   dir == IOREQ_WRITE ? seg_ds : seg_es,
   5.244 +                                   dir==IOREQ_WRITE ? x86_seg_ds : x86_seg_es,
   5.245                                     &base, &limit, &ar_bytes) ) {
   5.246              if ( !long_mode ) {
   5.247                  vmx_inject_hw_exception(current, TRAP_gp_fault, 0);
     6.1 --- a/xen/arch/x86/mm.c	Thu Nov 30 16:36:18 2006 +0000
     6.2 +++ b/xen/arch/x86/mm.c	Thu Nov 30 17:07:26 2006 +0000
     6.3 @@ -3040,7 +3040,7 @@ struct ptwr_emulate_ctxt {
     6.4  };
     6.5  
     6.6  static int ptwr_emulated_read(
     6.7 -    unsigned int seg,
     6.8 +    enum x86_segment seg,
     6.9      unsigned long offset,
    6.10      unsigned long *val,
    6.11      unsigned int bytes,
    6.12 @@ -3183,7 +3183,7 @@ static int ptwr_emulated_update(
    6.13  }
    6.14  
    6.15  static int ptwr_emulated_write(
    6.16 -    unsigned int seg,
    6.17 +    enum x86_segment seg,
    6.18      unsigned long offset,
    6.19      unsigned long val,
    6.20      unsigned int bytes,
    6.21 @@ -3195,7 +3195,7 @@ static int ptwr_emulated_write(
    6.22  }
    6.23  
    6.24  static int ptwr_emulated_cmpxchg(
    6.25 -    unsigned int seg,
    6.26 +    enum x86_segment seg,
    6.27      unsigned long offset,
    6.28      unsigned long old,
    6.29      unsigned long new,
    6.30 @@ -3208,7 +3208,7 @@ static int ptwr_emulated_cmpxchg(
    6.31  }
    6.32  
    6.33  static int ptwr_emulated_cmpxchg8b(
    6.34 -    unsigned int seg,
    6.35 +    enum x86_segment seg,
    6.36      unsigned long offset,
    6.37      unsigned long old,
    6.38      unsigned long old_hi,
     7.1 --- a/xen/arch/x86/mm/shadow/common.c	Thu Nov 30 16:36:18 2006 +0000
     7.2 +++ b/xen/arch/x86/mm/shadow/common.c	Thu Nov 30 17:07:26 2006 +0000
     7.3 @@ -69,14 +69,87 @@ int _shadow_mode_refcounts(struct domain
     7.4  /* x86 emulator support for the shadow code
     7.5   */
     7.6  
     7.7 +static int hvm_translate_linear_addr(
     7.8 +    enum x86_segment seg,
     7.9 +    unsigned long offset,
    7.10 +    unsigned int bytes,
    7.11 +    unsigned int is_write,
    7.12 +    unsigned long *paddr)
    7.13 +{
    7.14 +    struct segment_register creg, dreg;
    7.15 +    unsigned long limit, addr = offset;
    7.16 +    uint32_t last_byte;
    7.17 +
    7.18 +    hvm_get_segment_register(current, x86_seg_cs, &creg);
    7.19 +    hvm_get_segment_register(current, seg,        &dreg);
    7.20 +
    7.21 +    if ( !creg.attr.fields.l || !hvm_long_mode_enabled(current) )
    7.22 +    {
    7.23 +        /*
    7.24 +         * COMPATIBILITY MODE: Apply segment checks and add base.
    7.25 +         */
    7.26 +
    7.27 +        /* If this is a store, is the segment a writable data segment? */
    7.28 +        if ( is_write && ((dreg.attr.fields.type & 0xa) != 0x2) )
    7.29 +            goto gpf;
    7.30 +
    7.31 +        /* Calculate the segment limit, including granularity flag. */
    7.32 +        limit = dreg.limit;
    7.33 +        if ( dreg.attr.fields.g )
    7.34 +            limit = (limit << 12) | 0xfff;
    7.35 +
    7.36 +        last_byte = offset + bytes - 1;
    7.37 +
    7.38 +        /* Is this a grows-down data segment? Special limit check if so. */
    7.39 +        if ( (dreg.attr.fields.type & 0xc) == 0x4 )
    7.40 +        {
    7.41 +            /* Is upper limit 0xFFFF or 0xFFFFFFFF? */
    7.42 +            if ( !dreg.attr.fields.db )
    7.43 +                last_byte = (uint16_t)last_byte;
    7.44 +
    7.45 +            /* Check first byte and last byte against respective bounds. */
    7.46 +            if ( (offset <= limit) || (last_byte < offset) )
    7.47 +                goto gpf;
    7.48 +        }
    7.49 +        else if ( (last_byte > limit) || (last_byte < offset) )
    7.50 +            goto gpf; /* last byte is beyond limit or wraps 0xFFFFFFFF */
    7.51 +
    7.52 +        /*
    7.53 +         * Hardware truncates to 32 bits in compatibility mode.
    7.54 +         * It does not truncate to 16 bits in 16-bit address-size mode.
    7.55 +         */
    7.56 +        addr = (uint32_t)(addr + dreg.base);
    7.57 +    }
    7.58 +    else if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) )
    7.59 +    {
    7.60 +        /*
    7.61 +         * LONG MODE: FS and GS add a segment base.
    7.62 +         */
    7.63 +        addr += dreg.base;
    7.64 +    }
    7.65 +
    7.66 +    *paddr = addr;
    7.67 +    return 0;    
    7.68 +
    7.69 + gpf:
    7.70 +    /* Inject #GP(0). */
    7.71 +    hvm_inject_exception(TRAP_gp_fault, 0);
    7.72 +    return X86EMUL_PROPAGATE_FAULT;
    7.73 +}
    7.74 +
    7.75  static int
    7.76 -sh_x86_emulate_read(unsigned int seg,
    7.77 +sh_x86_emulate_read(enum x86_segment seg,
    7.78                      unsigned long offset,
    7.79                      unsigned long *val,
    7.80                      unsigned int bytes,
    7.81                      struct x86_emulate_ctxt *ctxt)
    7.82  {
    7.83 -    unsigned long addr = offset;
    7.84 +    unsigned long addr;
    7.85 +    int rc;
    7.86 +
    7.87 +    rc = hvm_translate_linear_addr(seg, offset, bytes, 0, &addr);
    7.88 +    if ( rc )
    7.89 +        return rc;
    7.90  
    7.91      *val = 0;
    7.92      // XXX -- this is WRONG.
    7.93 @@ -102,14 +175,19 @@ sh_x86_emulate_read(unsigned int seg,
    7.94  }
    7.95  
    7.96  static int
    7.97 -sh_x86_emulate_write(unsigned int seg,
    7.98 +sh_x86_emulate_write(enum x86_segment seg,
    7.99                       unsigned long offset,
   7.100                       unsigned long val,
   7.101                       unsigned int bytes,
   7.102                       struct x86_emulate_ctxt *ctxt)
   7.103  {
   7.104      struct vcpu *v = current;
   7.105 -    unsigned long addr = offset;
   7.106 +    unsigned long addr;
   7.107 +    int rc;
   7.108 +
   7.109 +    rc = hvm_translate_linear_addr(seg, offset, bytes, 1, &addr);
   7.110 +    if ( rc )
   7.111 +        return rc;
   7.112  
   7.113  #if 0
   7.114      SHADOW_PRINTK("d=%u v=%u a=%#lx v=%#lx bytes=%u\n",
   7.115 @@ -119,7 +197,7 @@ sh_x86_emulate_write(unsigned int seg,
   7.116  }
   7.117  
   7.118  static int 
   7.119 -sh_x86_emulate_cmpxchg(unsigned int seg,
   7.120 +sh_x86_emulate_cmpxchg(enum x86_segment seg,
   7.121                         unsigned long offset,
   7.122                         unsigned long old,
   7.123                         unsigned long new,
   7.124 @@ -127,7 +205,12 @@ sh_x86_emulate_cmpxchg(unsigned int seg,
   7.125                         struct x86_emulate_ctxt *ctxt)
   7.126  {
   7.127      struct vcpu *v = current;
   7.128 -    unsigned long addr = offset;
   7.129 +    unsigned long addr;
   7.130 +    int rc;
   7.131 +
   7.132 +    rc = hvm_translate_linear_addr(seg, offset, bytes, 1, &addr);
   7.133 +    if ( rc )
   7.134 +        return rc;
   7.135  
   7.136  #if 0
   7.137      SHADOW_PRINTK("d=%u v=%u a=%#lx o?=%#lx n:=%#lx bytes=%u\n",
   7.138 @@ -138,7 +221,7 @@ sh_x86_emulate_cmpxchg(unsigned int seg,
   7.139  }
   7.140  
   7.141  static int 
   7.142 -sh_x86_emulate_cmpxchg8b(unsigned int seg,
   7.143 +sh_x86_emulate_cmpxchg8b(enum x86_segment seg,
   7.144                           unsigned long offset,
   7.145                           unsigned long old_lo,
   7.146                           unsigned long old_hi,
   7.147 @@ -147,7 +230,12 @@ sh_x86_emulate_cmpxchg8b(unsigned int se
   7.148                           struct x86_emulate_ctxt *ctxt)
   7.149  {
   7.150      struct vcpu *v = current;
   7.151 -    unsigned long addr = offset;
   7.152 +    unsigned long addr;
   7.153 +    int rc;
   7.154 +
   7.155 +    rc = hvm_translate_linear_addr(seg, offset, 8, 1, &addr);
   7.156 +    if ( rc )
   7.157 +        return rc;
   7.158  
   7.159  #if 0
   7.160      SHADOW_PRINTK("d=%u v=%u a=%#lx o?=%#lx:%lx n:=%#lx:%lx\n",
     8.1 --- a/xen/arch/x86/x86_emulate.c	Thu Nov 30 16:36:18 2006 +0000
     8.2 +++ b/xen/arch/x86/x86_emulate.c	Thu Nov 30 17:07:26 2006 +0000
     8.3 @@ -199,8 +199,8 @@ struct operand {
     8.4      /* OP_REG: Pointer to register field. */
     8.5      unsigned long *reg;
     8.6      /* OP_MEM: Segment and offset. */
     8.7 -    unsigned int  mem_seg;
     8.8 -    unsigned long mem_off;
     8.9 +    enum x86_segment mem_seg;
    8.10 +    unsigned long    mem_off;
    8.11  };
    8.12  
    8.13  /* EFLAGS bit definitions. */
    8.14 @@ -375,7 +375,7 @@ do{ __asm__ __volatile__ (              
    8.15  /* Fetch next part of the instruction being emulated. */
    8.16  #define _insn_fetch(_size)                                      \
    8.17  ({ unsigned long _x;                                            \
    8.18 -   rc = ops->read(_regs.cs, _regs.eip, &_x, (_size), ctxt);     \
    8.19 +   rc = ops->read(x86_seg_cs, _regs.eip, &_x, (_size), ctxt);   \
    8.20     if ( rc != 0 )                                               \
    8.21         goto done;                                               \
    8.22     _regs.eip += (_size);                                        \
    8.23 @@ -452,7 +452,7 @@ dump_instr(
    8.24      dprintf("Instr:");
    8.25      for ( i = 0; i < 16; i++, eip++ )
    8.26      {
    8.27 -        if ( ops->read(ctxt->regs->cs, eip, &x, 1, ctxt) != 0 )
    8.28 +        if ( ops->read(x86_seg_cs, eip, &x, 1, ctxt) != 0 )
    8.29              printk(" ??");
    8.30          else
    8.31              printk(" %02x", (uint8_t)x);
    8.32 @@ -476,8 +476,8 @@ x86_emulate_memop(
    8.33      struct operand src, dst;
    8.34      int mode = ctxt->mode;
    8.35  
    8.36 -    unsigned int  ea_seg = X86_SEG_DS;
    8.37 -    unsigned long ea_off = 0;
    8.38 +    enum x86_segment ea_seg = x86_seg_ds;
    8.39 +    unsigned long    ea_off = 0;
    8.40  
    8.41      switch ( mode )
    8.42      {
    8.43 @@ -513,22 +513,22 @@ x86_emulate_memop(
    8.44                  ad_bytes ^= 6;  /* switch between 2/4 bytes */
    8.45              break;
    8.46          case 0x2e: /* CS override */
    8.47 -            ea_seg = X86_SEG_CS;
    8.48 +            ea_seg = x86_seg_cs;
    8.49              break;
    8.50          case 0x3e: /* DS override */
    8.51 -            ea_seg = X86_SEG_DS;
    8.52 +            ea_seg = x86_seg_ds;
    8.53              break;
    8.54          case 0x26: /* ES override */
    8.55 -            ea_seg = X86_SEG_ES;
    8.56 +            ea_seg = x86_seg_es;
    8.57              break;
    8.58          case 0x64: /* FS override */
    8.59 -            ea_seg = X86_SEG_FS;
    8.60 +            ea_seg = x86_seg_fs;
    8.61              break;
    8.62          case 0x65: /* GS override */
    8.63 -            ea_seg = X86_SEG_GS;
    8.64 +            ea_seg = x86_seg_gs;
    8.65              break;
    8.66          case 0x36: /* SS override */
    8.67 -            ea_seg = X86_SEG_SS;
    8.68 +            ea_seg = x86_seg_ss;
    8.69              break;
    8.70          case 0xf0: /* LOCK */
    8.71              lock_prefix = 1;
    8.72 @@ -860,7 +860,7 @@ x86_emulate_memop(
    8.73          /* 64-bit mode: POP always pops a 64-bit operand. */
    8.74          if ( mode == X86EMUL_MODE_PROT64 )
    8.75              dst.bytes = 8;
    8.76 -        if ( (rc = ops->read(X86_SEG_SS, truncate_ea(_regs.esp),
    8.77 +        if ( (rc = ops->read(x86_seg_ss, truncate_ea(_regs.esp),
    8.78                               &dst.val, dst.bytes, ctxt)) != 0 )
    8.79              goto done;
    8.80          register_address_increment(_regs.esp, dst.bytes);
    8.81 @@ -942,7 +942,7 @@ x86_emulate_memop(
    8.82                      goto done;
    8.83              }
    8.84              register_address_increment(_regs.esp, -dst.bytes);
    8.85 -            if ( (rc = ops->write(X86_SEG_SS, truncate_ea(_regs.esp),
    8.86 +            if ( (rc = ops->write(x86_seg_ss, truncate_ea(_regs.esp),
    8.87                                    dst.val, dst.bytes, ctxt)) != 0 )
    8.88                  goto done;
    8.89              dst.val = dst.orig_val; /* skanky: disable writeback */
    8.90 @@ -1024,7 +1024,7 @@ x86_emulate_memop(
    8.91      case 0xa4 ... 0xa5: /* movs */
    8.92          dst.type  = OP_MEM;
    8.93          dst.bytes = (d & ByteOp) ? 1 : op_bytes;
    8.94 -        dst.mem_seg = X86_SEG_ES;
    8.95 +        dst.mem_seg = x86_seg_es;
    8.96          dst.mem_off = truncate_ea(_regs.edi);
    8.97          if ( (rc = ops->read(ea_seg, truncate_ea(_regs.esi),
    8.98                               &dst.val, dst.bytes, ctxt)) != 0 )
    8.99 @@ -1037,7 +1037,7 @@ x86_emulate_memop(
   8.100      case 0xaa ... 0xab: /* stos */
   8.101          dst.type  = OP_MEM;
   8.102          dst.bytes = (d & ByteOp) ? 1 : op_bytes;
   8.103 -        dst.mem_seg = X86_SEG_ES;
   8.104 +        dst.mem_seg = x86_seg_es;
   8.105          dst.mem_off = truncate_ea(_regs.edi);
   8.106          dst.val   = _regs.eax;
   8.107          register_address_increment(
     9.1 --- a/xen/include/asm-x86/hvm/hvm.h	Thu Nov 30 16:36:18 2006 +0000
     9.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Thu Nov 30 17:07:26 2006 +0000
     9.3 @@ -17,28 +17,47 @@
     9.4   * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
     9.5   * Place - Suite 330, Boston, MA 02111-1307 USA.
     9.6   */
     9.7 +
     9.8  #ifndef __ASM_X86_HVM_HVM_H__
     9.9  #define __ASM_X86_HVM_HVM_H__
    9.10  
    9.11 -enum segment {
    9.12 -    seg_cs,
    9.13 -    seg_ss,
    9.14 -    seg_ds,
    9.15 -    seg_es,
    9.16 -    seg_fs,
    9.17 -    seg_gs,
    9.18 -    seg_tr,
    9.19 -    seg_ldtr,
    9.20 -    seg_gdtr,
    9.21 -    seg_idtr
    9.22 -};
    9.23 +#include <asm/x86_emulate.h>
    9.24 +
    9.25 +/* 
    9.26 + * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
    9.27 + * segment descriptor. It happens to match the format of an AMD SVM VMCB.
    9.28 + */
    9.29 +typedef union segment_attributes {
    9.30 +    u16 bytes;
    9.31 +    struct
    9.32 +    {
    9.33 +        u16 type:4;    /* 0;  Bit 40-43 */
    9.34 +        u16 s:   1;    /* 4;  Bit 44 */
    9.35 +        u16 dpl: 2;    /* 5;  Bit 45-46 */
    9.36 +        u16 p:   1;    /* 7;  Bit 47 */
    9.37 +        u16 avl: 1;    /* 8;  Bit 52 */
    9.38 +        u16 l:   1;    /* 9;  Bit 53 */
    9.39 +        u16 db:  1;    /* 10; Bit 54 */
    9.40 +        u16 g:   1;    /* 11; Bit 55 */
    9.41 +    } fields;
    9.42 +} __attribute__ ((packed)) segment_attributes_t;
    9.43 +
    9.44 +/*
    9.45 + * Full state of a segment register (visible and hidden portions).
    9.46 + * Again, this happens to match the format of an AMD SVM VMCB.
    9.47 + */
    9.48 +typedef struct segment_register {
    9.49 +    u16        sel;
    9.50 +    segment_attributes_t attr;
    9.51 +    u32        limit;
    9.52 +    u64        base;
    9.53 +} __attribute__ ((packed)) segment_register_t;
    9.54  
    9.55  /*
    9.56   * The hardware virtual machine (HVM) interface abstracts away from the
    9.57   * x86/x86_64 CPU virtualization assist specifics. Currently this interface
    9.58   * supports Intel's VT-x and AMD's SVM extensions.
    9.59   */
    9.60 -
    9.61  struct hvm_function_table {
    9.62      /*
    9.63       *  Disable HVM functionality
    9.64 @@ -74,7 +93,9 @@ struct hvm_function_table {
    9.65      int (*pae_enabled)(struct vcpu *v);
    9.66      int (*guest_x86_mode)(struct vcpu *v);
    9.67      unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
    9.68 -    unsigned long (*get_segment_base)(struct vcpu *v, enum segment seg);
    9.69 +    unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
    9.70 +    void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
    9.71 +                                 struct segment_register *reg);
    9.72  
    9.73      /* 
    9.74       * Re-set the value of CR3 that Xen runs on when handling VM exits
    9.75 @@ -89,6 +110,8 @@ struct hvm_function_table {
    9.76      void (*stts)(struct vcpu *v);
    9.77      void (*set_tsc_offset)(struct vcpu *v, u64 offset);
    9.78  
    9.79 +    void (*inject_exception)(unsigned int trapnr, int errcode);
    9.80 +
    9.81      void (*init_ap_context)(struct vcpu_guest_context *ctxt,
    9.82                              int vcpuid, int trampoline_vector);
    9.83  
    9.84 @@ -172,11 +195,18 @@ hvm_get_guest_ctrl_reg(struct vcpu *v, u
    9.85  }
    9.86  
    9.87  static inline unsigned long
    9.88 -hvm_get_segment_base(struct vcpu *v, enum segment seg)
    9.89 +hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
    9.90  {
    9.91      return hvm_funcs.get_segment_base(v, seg);
    9.92  }
    9.93  
    9.94 +static inline void
    9.95 +hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
    9.96 +                         struct segment_register *reg)
    9.97 +{
    9.98 +    hvm_funcs.get_segment_register(v, seg, reg);
    9.99 +}
   9.100 +
   9.101  void hvm_stts(struct vcpu *v);
   9.102  void hvm_set_guest_time(struct vcpu *v, u64 gtime);
   9.103  void hvm_freeze_time(struct vcpu *v);
   9.104 @@ -190,6 +220,12 @@ hvm_init_ap_context(struct vcpu_guest_co
   9.105      return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
   9.106  }
   9.107  
   9.108 +static inline void
   9.109 +hvm_inject_exception(unsigned int trapnr, int errcode)
   9.110 +{
   9.111 +    hvm_funcs.inject_exception(trapnr, errcode);
   9.112 +}
   9.113 +
   9.114  int hvm_bringup_ap(int vcpuid, int trampoline_vector);
   9.115  
   9.116  #endif /* __ASM_X86_HVM_HVM_H__ */
    10.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Thu Nov 30 16:36:18 2006 +0000
    10.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Thu Nov 30 17:07:26 2006 +0000
    10.3 @@ -311,33 +311,9 @@ enum {
    10.4      SVM_CPU_STATE_ASSIST_ENABLED,
    10.5  };  
    10.6  
    10.7 -/* 
    10.8 - * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
    10.9 - * segment descriptor. 
   10.10 - */
   10.11 -typedef union
   10.12 -{
   10.13 -    u16 bytes;
   10.14 -    struct
   10.15 -    {
   10.16 -        u16 type:4;    /* 0;  Bit 40-43 */
   10.17 -        u16 s:   1;    /* 4;  Bit 44 */
   10.18 -        u16 dpl: 2;    /* 5;  Bit 45-46 */
   10.19 -        u16 p:   1;    /* 7;  Bit 47 */
   10.20 -        u16 avl: 1;    /* 8;  Bit 52 */
   10.21 -        u16 l:   1;    /* 9;  Bit 53 */
   10.22 -        u16 db:  1;    /* 10; Bit 54 */
   10.23 -        u16 g:   1;    /* 11; Bit 55 */
   10.24 -    } fields;
   10.25 -} __attribute__ ((packed)) segment_attributes_t;
   10.26 -
   10.27 -typedef struct 
   10.28 -{
   10.29 -    u16        sel;
   10.30 -    segment_attributes_t attributes;
   10.31 -    u32        limit;
   10.32 -    u64        base;
   10.33 -} __attribute__ ((packed)) segment_selector_t;
   10.34 +/* Definitions of segment state are borrowed by the generic HVM code. */
   10.35 +typedef segment_attributes_t svm_segment_attributes_t;
   10.36 +typedef segment_register_t svm_segment_register_t;
   10.37  
   10.38  typedef union 
   10.39  {
   10.40 @@ -426,16 +402,16 @@ struct vmcb_struct {
   10.41      u64 h_cr3;                  /* offset 0xB0 */
   10.42      u64 res09[105];             /* offset 0xB8 pad to save area */
   10.43  
   10.44 -    segment_selector_t es;      /* offset 1024 */
   10.45 -    segment_selector_t cs;
   10.46 -    segment_selector_t ss;
   10.47 -    segment_selector_t ds;
   10.48 -    segment_selector_t fs;
   10.49 -    segment_selector_t gs;
   10.50 -    segment_selector_t gdtr;
   10.51 -    segment_selector_t ldtr;
   10.52 -    segment_selector_t idtr;
   10.53 -    segment_selector_t tr;
   10.54 +    svm_segment_register_t es;      /* offset 1024 */
   10.55 +    svm_segment_register_t cs;
   10.56 +    svm_segment_register_t ss;
   10.57 +    svm_segment_register_t ds;
   10.58 +    svm_segment_register_t fs;
   10.59 +    svm_segment_register_t gs;
   10.60 +    svm_segment_register_t gdtr;
   10.61 +    svm_segment_register_t ldtr;
   10.62 +    svm_segment_register_t idtr;
   10.63 +    svm_segment_register_t tr;
   10.64      u64 res10[5];
   10.65      u8 res11[3];
   10.66      u8 cpl;
    11.1 --- a/xen/include/asm-x86/x86_emulate.h	Thu Nov 30 16:36:18 2006 +0000
    11.2 +++ b/xen/include/asm-x86/x86_emulate.h	Thu Nov 30 17:07:26 2006 +0000
    11.3 @@ -11,16 +11,26 @@
    11.4  
    11.5  struct x86_emulate_ctxt;
    11.6  
    11.7 -#define X86_SEG_CS 0
    11.8 -#define X86_SEG_SS 1
    11.9 -#define X86_SEG_DS 2
   11.10 -#define X86_SEG_ES 3
   11.11 -#define X86_SEG_FS 4
   11.12 -#define X86_SEG_GS 5
   11.13 +/*
   11.14 + * Comprehensive enumeration of x86 segment registers. Note that the system
   11.15 + * registers (TR, LDTR, GDTR, IDTR) are never referenced by the emulator.
   11.16 + */
   11.17 +enum x86_segment {
   11.18 +    /* General purpose. */
   11.19 +    x86_seg_cs,
   11.20 +    x86_seg_ss,
   11.21 +    x86_seg_ds,
   11.22 +    x86_seg_es,
   11.23 +    x86_seg_fs,
   11.24 +    x86_seg_gs,
   11.25 +    /* System. */
   11.26 +    x86_seg_tr,
   11.27 +    x86_seg_ldtr,
   11.28 +    x86_seg_gdtr,
   11.29 +    x86_seg_idtr
   11.30 +};
   11.31  
   11.32  /*
   11.33 - * x86_emulate_ops:
   11.34 - * 
   11.35   * These operations represent the instruction emulator's interface to memory.
   11.36   * 
   11.37   * NOTES:
   11.38 @@ -45,7 +55,7 @@ struct x86_emulate_ops
   11.39  {
   11.40      /*
   11.41       * All functions:
   11.42 -     *  @seg:   [IN ] Segment being dereferenced (specified as X86_SEG_??).
   11.43 +     *  @seg:   [IN ] Segment being dereferenced (specified as x86_seg_??).
   11.44       *  @offset [IN ] Offset within segment.
   11.45       */
   11.46  
   11.47 @@ -55,7 +65,7 @@ struct x86_emulate_ops
   11.48       *  @bytes: [IN ] Number of bytes to read from memory.
   11.49       */
   11.50      int (*read)(
   11.51 -        unsigned int seg,
   11.52 +        enum x86_segment seg,
   11.53          unsigned long offset,
   11.54          unsigned long *val,
   11.55          unsigned int bytes,
   11.56 @@ -67,7 +77,7 @@ struct x86_emulate_ops
   11.57       *  @bytes: [IN ] Number of bytes to write to memory.
   11.58       */
   11.59      int (*write)(
   11.60 -        unsigned int seg,
   11.61 +        enum x86_segment seg,
   11.62          unsigned long offset,
   11.63          unsigned long val,
   11.64          unsigned int bytes,
   11.65 @@ -80,7 +90,7 @@ struct x86_emulate_ops
   11.66       *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
   11.67       */
   11.68      int (*cmpxchg)(
   11.69 -        unsigned int seg,
   11.70 +        enum x86_segment seg,
   11.71          unsigned long offset,
   11.72          unsigned long old,
   11.73          unsigned long new,
   11.74 @@ -98,7 +108,7 @@ struct x86_emulate_ops
   11.75       *     to defining a function that always returns X86EMUL_UNHANDLEABLE.
   11.76       */
   11.77      int (*cmpxchg8b)(
   11.78 -        unsigned int seg,
   11.79 +        enum x86_segment seg,
   11.80          unsigned long offset,
   11.81          unsigned long old_lo,
   11.82          unsigned long old_hi,