ia64/xen-unstable

changeset 16501:32ec5dbe2978

merge with xen-unstable.hg
author Alex Williamson <alex.williamson@hp.com>
date Fri Nov 30 08:54:33 2007 -0700 (2007-11-30)
parents f9ca1d8c9e65 3057f813da14
children aa430556d33f
files
line diff
     1.1 --- a/extras/mini-os/arch/x86/mm.c	Thu Nov 29 12:15:43 2007 -0700
     1.2 +++ b/extras/mini-os/arch/x86/mm.c	Fri Nov 30 08:54:33 2007 -0700
     1.3 @@ -270,6 +270,9 @@ void build_pagetable(unsigned long *star
     1.4          start_address += PAGE_SIZE;
     1.5      }
     1.6  
     1.7 +    if (HYPERVISOR_update_va_mapping(0, (pte_t) {}, UVMF_INVLPG))
     1.8 +        printk("Unable to unmap page 0\n");
     1.9 +
    1.10      *start_pfn = pt_pfn;
    1.11  }
    1.12  
     2.1 --- a/extras/mini-os/gnttab.c	Thu Nov 29 12:15:43 2007 -0700
     2.2 +++ b/extras/mini-os/gnttab.c	Fri Nov 30 08:54:33 2007 -0700
     2.3 @@ -18,6 +18,7 @@
     2.4  #include <os.h>
     2.5  #include <mm.h>
     2.6  #include <gnttab.h>
     2.7 +#include <semaphore.h>
     2.8  
     2.9  #define NR_RESERVED_ENTRIES 8
    2.10  
    2.11 @@ -31,20 +32,29 @@
    2.12  
    2.13  static grant_entry_t *gnttab_table;
    2.14  static grant_ref_t gnttab_list[NR_GRANT_ENTRIES];
    2.15 +static __DECLARE_SEMAPHORE_GENERIC(gnttab_sem, NR_GRANT_ENTRIES);
    2.16  
    2.17  static void
    2.18  put_free_entry(grant_ref_t ref)
    2.19  {
    2.20 +    unsigned long flags;
    2.21 +    local_irq_save(flags);
    2.22      gnttab_list[ref] = gnttab_list[0];
    2.23      gnttab_list[0]  = ref;
    2.24 -
    2.25 +    local_irq_restore(flags);
    2.26 +    up(&gnttab_sem);
    2.27  }
    2.28  
    2.29  static grant_ref_t
    2.30  get_free_entry(void)
    2.31  {
    2.32 -    unsigned int ref = gnttab_list[0];
    2.33 +    unsigned int ref;
    2.34 +    unsigned long flags;
    2.35 +    down(&gnttab_sem);
    2.36 +    local_irq_save(flags);
    2.37 +    ref = gnttab_list[0];
    2.38      gnttab_list[0] = gnttab_list[ref];
    2.39 +    local_irq_restore(flags);
    2.40      return ref;
    2.41  }
    2.42  
     3.1 --- a/extras/mini-os/lib/string.c	Thu Nov 29 12:15:43 2007 -0700
     3.2 +++ b/extras/mini-os/lib/string.c	Fri Nov 30 08:54:33 2007 -0700
     3.3 @@ -142,7 +142,7 @@ char * strchr(const char * s, int c)
     3.4  
     3.5  char * strrchr(const char * s, int c)
     3.6  {
     3.7 -        const char *res;
     3.8 +        const char *res = NULL;
     3.9          for(; *s != '\0'; ++s)
    3.10                  if (*s == (char) c)
    3.11                          res = s;
     4.1 --- a/extras/mini-os/netfront.c	Thu Nov 29 12:15:43 2007 -0700
     4.2 +++ b/extras/mini-os/netfront.c	Fri Nov 30 08:54:33 2007 -0700
     4.3 @@ -147,6 +147,7 @@ moretodo:
     4.4          struct net_buffer* buf = &rx_buffers[id];
     4.5          void* page = buf->page;
     4.6  
     4.7 +        /* We are sure to have free gnttab entries since they got released above */
     4.8          buf->gref = req->gref = 
     4.9              gnttab_grant_access(0,virt_to_mfn(page),0);
    4.10  
    4.11 @@ -436,8 +437,9 @@ void netfront_xmit(unsigned char* data,i
    4.12      down(&tx_sem);
    4.13  
    4.14      local_irq_save(flags);
    4.15 +    id = get_id_from_freelist(tx_freelist);
    4.16 +    local_irq_restore(flags);
    4.17  
    4.18 -    id = get_id_from_freelist(tx_freelist);
    4.19      buf = &tx_buffers[id];
    4.20      page = buf->page;
    4.21  
    4.22 @@ -461,7 +463,7 @@ void netfront_xmit(unsigned char* data,i
    4.23  
    4.24      if(notify) notify_remote_via_evtchn(info->evtchn);
    4.25  
    4.26 +    local_irq_save(flags);
    4.27      network_tx_buf_gc();
    4.28 -
    4.29      local_irq_restore(flags);
    4.30  }
     5.1 --- a/extras/mini-os/xenbus/xenbus.c	Thu Nov 29 12:15:43 2007 -0700
     5.2 +++ b/extras/mini-os/xenbus/xenbus.c	Fri Nov 30 08:54:33 2007 -0700
     5.3 @@ -79,7 +79,6 @@ void wait_for_watch(void)
     5.4      schedule();
     5.5      remove_waiter(w);
     5.6      wake(current);
     5.7 -    remove_wait_queue(&w);
     5.8  }
     5.9  
    5.10  char* xenbus_wait_for_value(const char* path,const char* value)
     6.1 --- a/xen/arch/x86/domctl.c	Thu Nov 29 12:15:43 2007 -0700
     6.2 +++ b/xen/arch/x86/domctl.c	Fri Nov 30 08:54:33 2007 -0700
     6.3 @@ -709,11 +709,6 @@ long arch_do_domctl(
     6.4  
     6.5          evc = &domctl->u.ext_vcpucontext;
     6.6  
     6.7 -        ret = (evc->size < sizeof(*evc)) ? -EINVAL : 0;
     6.8 -        evc->size = sizeof(*evc);
     6.9 -        if ( ret != 0 )
    6.10 -            break;
    6.11 -
    6.12          ret = -ESRCH;
    6.13          d = rcu_lock_domain_by_id(domctl->domain);
    6.14          if ( d == NULL )
    6.15 @@ -726,6 +721,7 @@ long arch_do_domctl(
    6.16  
    6.17          if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
    6.18          {
    6.19 +            evc->size = sizeof(*evc);
    6.20  #ifdef __x86_64__
    6.21              evc->sysenter_callback_cs      = v->arch.sysenter_callback_cs;
    6.22              evc->sysenter_callback_eip     = v->arch.sysenter_callback_eip;
    6.23 @@ -744,6 +740,9 @@ long arch_do_domctl(
    6.24          }
    6.25          else
    6.26          {
    6.27 +            ret = -EINVAL;
    6.28 +            if ( evc->size != sizeof(*evc) )
    6.29 +                goto ext_vcpucontext_out;
    6.30  #ifdef __x86_64__
    6.31              fixup_guest_code_selector(d, evc->sysenter_callback_cs);
    6.32              v->arch.sysenter_callback_cs      = evc->sysenter_callback_cs;
    6.33 @@ -755,7 +754,6 @@ long arch_do_domctl(
    6.34              v->arch.syscall32_disables_events = evc->syscall32_disables_events;
    6.35  #else
    6.36              /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
    6.37 -            ret = -EINVAL;
    6.38              if ( (evc->sysenter_callback_cs & ~3) ||
    6.39                   evc->sysenter_callback_eip ||
    6.40                   (evc->syscall32_callback_cs & ~3) ||
    6.41 @@ -768,7 +766,8 @@ long arch_do_domctl(
    6.42  
    6.43      ext_vcpucontext_out:
    6.44          rcu_unlock_domain(d);
    6.45 -        if ( copy_to_guest(u_domctl, domctl, 1) )
    6.46 +        if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
    6.47 +             copy_to_guest(u_domctl, domctl, 1) )
    6.48              ret = -EFAULT;
    6.49      }
    6.50      break;
     7.1 --- a/xen/arch/x86/hvm/hpet.c	Thu Nov 29 12:15:43 2007 -0700
     7.2 +++ b/xen/arch/x86/hvm/hpet.c	Fri Nov 30 08:54:33 2007 -0700
     7.3 @@ -170,7 +170,7 @@ static unsigned long hpet_read(
     7.4  
     7.5      result = val;
     7.6      if ( length != 8 )
     7.7 -        result = (val >> ((addr & 7) * 8)) & ((1UL << (length * 8)) - 1);
     7.8 +        result = (val >> ((addr & 7) * 8)) & ((1ULL << (length * 8)) - 1);
     7.9  
    7.10      spin_unlock(&h->lock);
    7.11  
     8.1 --- a/xen/arch/x86/hvm/platform.c	Thu Nov 29 12:15:43 2007 -0700
     8.2 +++ b/xen/arch/x86/hvm/platform.c	Fri Nov 30 08:54:33 2007 -0700
     8.3 @@ -1051,13 +1051,18 @@ void handle_mmio(unsigned long gpa)
     8.4      }
     8.5  
     8.6      if ( mmio_decode(address_bytes, inst, mmio_op, &ad_size,
     8.7 -                     &op_size, &seg_sel) == DECODE_failure ) {
     8.8 -        printk("handle_mmio: failed to decode instruction\n");
     8.9 -        printk("mmio opcode: gpa 0x%lx, len %d:", gpa, inst_len);
    8.10 +                     &op_size, &seg_sel) == DECODE_failure )
    8.11 +    {
    8.12 +        gdprintk(XENLOG_WARNING,
    8.13 +                 "handle_mmio: failed to decode instruction\n");
    8.14 +        gdprintk(XENLOG_WARNING,
    8.15 +                 "mmio opcode: gpa 0x%lx, len %d:", gpa, inst_len);
    8.16          for ( i = 0; i < inst_len; i++ )
    8.17              printk(" %02x", inst[i] & 0xFF);
    8.18          printk("\n");
    8.19 -        domain_crash_synchronous();
    8.20 +
    8.21 +        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
    8.22 +        return;
    8.23      }
    8.24  
    8.25      regs->eip += inst_len; /* advance %eip */
     9.1 --- a/xen/arch/x86/hvm/vlapic.c	Thu Nov 29 12:15:43 2007 -0700
     9.2 +++ b/xen/arch/x86/hvm/vlapic.c	Fri Nov 30 08:54:33 2007 -0700
     9.3 @@ -661,7 +661,8 @@ static void vlapic_write(struct vcpu *v,
     9.4  
     9.5      case APIC_TMICT:
     9.6      {
     9.7 -        uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)val * vlapic->hw.timer_divisor;
     9.8 +        uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
     9.9 +                            (uint32_t)val * vlapic->hw.timer_divisor;
    9.10  
    9.11          vlapic_set_reg(vlapic, APIC_TMICT, val);
    9.12          create_periodic_time(current, &vlapic->pt, period, vlapic->pt.irq,
    9.13 @@ -820,8 +821,10 @@ static void lapic_rearm(struct vlapic *s
    9.14      unsigned long tmict;
    9.15  
    9.16      tmict = vlapic_get_reg(s, APIC_TMICT);
    9.17 -    if (tmict > 0) {
    9.18 -        uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)tmict * s->hw.timer_divisor;
    9.19 +    if ( tmict > 0 )
    9.20 +    {
    9.21 +        uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
    9.22 +                            (uint32_t)tmict * s->hw.timer_divisor;
    9.23          uint32_t lvtt = vlapic_get_reg(s, APIC_LVTT);
    9.24  
    9.25          s->pt.irq = lvtt & APIC_VECTOR_MASK;
    9.26 @@ -830,9 +833,9 @@ static void lapic_rearm(struct vlapic *s
    9.27                               &s->timer_last_update);
    9.28  
    9.29          printk("lapic_load to rearm the actimer:"
    9.30 -                    "bus cycle is %uns, "
    9.31 -                    "saved tmict count %lu, period %"PRIu64"ns, irq=%"PRIu8"\n",
    9.32 -                    APIC_BUS_CYCLE_NS, tmict, period, s->pt.irq);
    9.33 +               "bus cycle is %uns, "
    9.34 +               "saved tmict count %lu, period %"PRIu64"ns, irq=%"PRIu8"\n",
    9.35 +               APIC_BUS_CYCLE_NS, tmict, period, s->pt.irq);
    9.36      }
    9.37  
    9.38      lapic_info(s);
    10.1 --- a/xen/arch/x86/hvm/vmx/realmode.c	Thu Nov 29 12:15:43 2007 -0700
    10.2 +++ b/xen/arch/x86/hvm/vmx/realmode.c	Fri Nov 30 08:54:33 2007 -0700
    10.3 @@ -88,12 +88,12 @@ static void realmode_deliver_exception(
    10.4  
    10.5      if ( rm_ctxt->ctxt.addr_size == 32 )
    10.6      {
    10.7 -        regs->esp -= 4;
    10.8 +        regs->esp -= 6;
    10.9          pstk = regs->esp;
   10.10      }
   10.11      else
   10.12      {
   10.13 -        pstk = (uint16_t)(regs->esp - 4);
   10.14 +        pstk = (uint16_t)(regs->esp - 6);
   10.15          regs->esp &= ~0xffff;
   10.16          regs->esp |= pstk;
   10.17      }
   10.18 @@ -237,7 +237,8 @@ realmode_emulate_cmpxchg(
   10.19      unsigned int bytes,
   10.20      struct x86_emulate_ctxt *ctxt)
   10.21  {
   10.22 -    return X86EMUL_UNHANDLEABLE;
   10.23 +    /* Fix this in case the guest is really relying on r-m-w atomicity. */
   10.24 +    return realmode_emulate_write(seg, offset, new, bytes, ctxt);
   10.25  }
   10.26  
   10.27  static int
   10.28 @@ -337,6 +338,36 @@ realmode_read_cr(
   10.29      return X86EMUL_OKAY;
   10.30  }
   10.31  
   10.32 +static int
   10.33 +realmode_write_cr(
   10.34 +    unsigned int reg,
   10.35 +    unsigned long val,
   10.36 +    struct x86_emulate_ctxt *ctxt)
   10.37 +{
   10.38 +    switch ( reg )
   10.39 +    {
   10.40 +    case 0:
   10.41 +        if ( !hvm_set_cr0(val) )
   10.42 +            return X86EMUL_UNHANDLEABLE;
   10.43 +        break;
   10.44 +    case 2:
   10.45 +        current->arch.hvm_vcpu.guest_cr[2] = val;
   10.46 +        break;
   10.47 +    case 3:
   10.48 +        if ( !hvm_set_cr3(val) )
   10.49 +            return X86EMUL_UNHANDLEABLE;
   10.50 +        break;
   10.51 +    case 4:
   10.52 +        if ( !hvm_set_cr4(val) )
   10.53 +            return X86EMUL_UNHANDLEABLE;
   10.54 +        break;
   10.55 +    default:
   10.56 +        return X86EMUL_UNHANDLEABLE;
   10.57 +    }
   10.58 +
   10.59 +    return X86EMUL_OKAY;
   10.60 +}
   10.61 +
   10.62  static int realmode_write_rflags(
   10.63      unsigned long val,
   10.64      struct x86_emulate_ctxt *ctxt)
   10.65 @@ -411,6 +442,7 @@ static struct x86_emulate_ops realmode_e
   10.66      .read_io       = realmode_read_io,
   10.67      .write_io      = realmode_write_io,
   10.68      .read_cr       = realmode_read_cr,
   10.69 +    .write_cr      = realmode_write_cr,
   10.70      .write_rflags  = realmode_write_rflags,
   10.71      .wbinvd        = realmode_wbinvd,
   10.72      .cpuid         = realmode_cpuid,
   10.73 @@ -419,12 +451,12 @@ static struct x86_emulate_ops realmode_e
   10.74      .inject_sw_interrupt = realmode_inject_sw_interrupt
   10.75  };
   10.76  
   10.77 -int vmx_realmode(struct cpu_user_regs *regs)
   10.78 +void vmx_realmode(struct cpu_user_regs *regs)
   10.79  {
   10.80      struct vcpu *curr = current;
   10.81      struct realmode_emulate_ctxt rm_ctxt;
   10.82      unsigned long intr_info;
   10.83 -    int i, rc = 0;
   10.84 +    int i, rc;
   10.85      u32 intr_shadow, new_intr_shadow;
   10.86  
   10.87      rm_ctxt.ctxt.regs = regs;
   10.88 @@ -487,29 +519,43 @@ int vmx_realmode(struct cpu_user_regs *r
   10.89              hvm_hlt(regs->eflags);
   10.90  
   10.91          if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
   10.92 -        {
   10.93 -            rc = 0;
   10.94              break;
   10.95 -        }
   10.96  
   10.97          if ( rc == X86EMUL_UNHANDLEABLE )
   10.98          {
   10.99 -            gdprintk(XENLOG_DEBUG,
  10.100 -                     "RM %04x:%08lx: %02x %02x %02x %02x %02x %02x\n",
  10.101 +            gdprintk(XENLOG_ERR,
  10.102 +                     "Real-mode emulation failed @ %04x:%08lx: "
  10.103 +                     "%02x %02x %02x %02x %02x %02x\n",
  10.104                       rm_ctxt.seg_reg[x86_seg_cs].sel, rm_ctxt.insn_buf_eip,
  10.105                       rm_ctxt.insn_buf[0], rm_ctxt.insn_buf[1],
  10.106                       rm_ctxt.insn_buf[2], rm_ctxt.insn_buf[3],
  10.107                       rm_ctxt.insn_buf[4], rm_ctxt.insn_buf[5]);
  10.108 -            gdprintk(XENLOG_ERR, "Emulation failed\n");
  10.109 -            rc = -EINVAL;
  10.110 -            break;
  10.111 +            domain_crash_synchronous();
  10.112          }
  10.113      }
  10.114  
  10.115 +    /*
  10.116 +     * Cannot enter protected mode with bogus selector RPLs and DPLs. Hence we
  10.117 +     * fix up as best we can, even though this deviates from native execution
  10.118 +     */
  10.119 +    if  ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
  10.120 +    {
  10.121 +        /* CS.RPL == SS.RPL == SS.DPL == 0. */
  10.122 +        rm_ctxt.seg_reg[x86_seg_cs].sel &= ~3;
  10.123 +        rm_ctxt.seg_reg[x86_seg_ss].sel &= ~3;
  10.124 +        /* DS,ES,FS,GS: The most uninvasive trick is to set DPL == RPL. */
  10.125 +        rm_ctxt.seg_reg[x86_seg_ds].attr.fields.dpl =
  10.126 +            rm_ctxt.seg_reg[x86_seg_ds].sel & 3;
  10.127 +        rm_ctxt.seg_reg[x86_seg_es].attr.fields.dpl =
  10.128 +            rm_ctxt.seg_reg[x86_seg_es].sel & 3;
  10.129 +        rm_ctxt.seg_reg[x86_seg_fs].attr.fields.dpl =
  10.130 +            rm_ctxt.seg_reg[x86_seg_fs].sel & 3;
  10.131 +        rm_ctxt.seg_reg[x86_seg_gs].attr.fields.dpl =
  10.132 +            rm_ctxt.seg_reg[x86_seg_gs].sel & 3;
  10.133 +    }
  10.134 +
  10.135      for ( i = 0; i < 10; i++ )
  10.136          hvm_set_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
  10.137 -
  10.138 -    return rc;
  10.139  }
  10.140  
  10.141  int vmx_realmode_io_complete(void)
  10.142 @@ -520,12 +566,6 @@ int vmx_realmode_io_complete(void)
  10.143      if ( !curr->arch.hvm_vmx.real_mode_io_in_progress )
  10.144          return 0;
  10.145  
  10.146 -#if 0
  10.147 -    gdprintk(XENLOG_DEBUG, "RM I/O %d %c bytes=%d addr=%lx data=%lx\n",
  10.148 -             p->type, p->dir ? 'R' : 'W',
  10.149 -             (int)p->size, (long)p->addr, (long)p->data);
  10.150 -#endif
  10.151 -
  10.152      curr->arch.hvm_vmx.real_mode_io_in_progress = 0;
  10.153      if ( p->dir == IOREQ_READ )
  10.154      {
    11.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Thu Nov 29 12:15:43 2007 -0700
    11.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Fri Nov 30 08:54:33 2007 -0700
    11.3 @@ -819,10 +819,16 @@ static unsigned long vmr(unsigned long f
    11.4      return rc ? 0 : val;
    11.5  }
    11.6  
    11.7 -void vmcs_dump_vcpu(void)
    11.8 +void vmcs_dump_vcpu(struct vcpu *v)
    11.9  {
   11.10 +    struct cpu_user_regs *regs = &v->arch.guest_context.user_regs;
   11.11      unsigned long long x;
   11.12  
   11.13 +    if ( v == current )
   11.14 +        regs = guest_cpu_user_regs();
   11.15 +
   11.16 +    vmx_vmcs_enter(v);
   11.17 +
   11.18      printk("*** Guest State ***\n");
   11.19      printk("CR0: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n",
   11.20             (unsigned long long)vmr(GUEST_CR0),
   11.21 @@ -841,11 +847,14 @@ void vmcs_dump_vcpu(void)
   11.22      printk("     target2=%016llx, target3=%016llx\n",
   11.23             (unsigned long long)vmr(CR3_TARGET_VALUE2),
   11.24             (unsigned long long)vmr(CR3_TARGET_VALUE3));
   11.25 -    printk("RSP = 0x%016llx  RIP = 0x%016llx\n", 
   11.26 +    printk("RSP = 0x%016llx (0x%016llx)  RIP = 0x%016llx (0x%016llx)\n", 
   11.27             (unsigned long long)vmr(GUEST_RSP),
   11.28 -           (unsigned long long)vmr(GUEST_RIP));
   11.29 -    printk("RFLAGS=0x%016llx  DR7 = 0x%016llx\n", 
   11.30 +           (unsigned long long)regs->esp,
   11.31 +           (unsigned long long)vmr(GUEST_RIP),
   11.32 +           (unsigned long long)regs->eip);
   11.33 +    printk("RFLAGS=0x%016llx (0x%016llx)  DR7 = 0x%016llx\n", 
   11.34             (unsigned long long)vmr(GUEST_RFLAGS),
   11.35 +           (unsigned long long)regs->eflags,
   11.36             (unsigned long long)vmr(GUEST_DR7));
   11.37      printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n",
   11.38             (unsigned long long)vmr(GUEST_SYSENTER_ESP),
   11.39 @@ -926,6 +935,8 @@ void vmcs_dump_vcpu(void)
   11.40             (uint32_t)vmr(IDT_VECTORING_ERROR_CODE));
   11.41      printk("TPR Threshold = 0x%02x\n",
   11.42             (uint32_t)vmr(TPR_THRESHOLD));
   11.43 +
   11.44 +    vmx_vmcs_exit(v);
   11.45  }
   11.46  
   11.47  static void vmcs_dump(unsigned char ch)
   11.48 @@ -945,9 +956,7 @@ static void vmcs_dump(unsigned char ch)
   11.49          for_each_vcpu ( d, v )
   11.50          {
   11.51              printk("\tVCPU %d\n", v->vcpu_id);
   11.52 -            vmx_vmcs_enter(v);
   11.53 -            vmcs_dump_vcpu();
   11.54 -            vmx_vmcs_exit(v);
   11.55 +            vmcs_dump_vcpu(v);
   11.56          }
   11.57      }
   11.58  
    12.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 29 12:15:43 2007 -0700
    12.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Nov 30 08:54:33 2007 -0700
    12.3 @@ -2722,6 +2722,7 @@ static void vmx_failed_vmentry(unsigned 
    12.4  {
    12.5      unsigned int failed_vmentry_reason = (uint16_t)exit_reason;
    12.6      unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
    12.7 +    struct vcpu *curr = current;
    12.8  
    12.9      printk("Failed vm entry (exit reason 0x%x) ", exit_reason);
   12.10      switch ( failed_vmentry_reason )
   12.11 @@ -2734,7 +2735,7 @@ static void vmx_failed_vmentry(unsigned 
   12.12          break;
   12.13      case EXIT_REASON_MACHINE_CHECK:
   12.14          printk("caused by machine check.\n");
   12.15 -        HVMTRACE_0D(MCE, current);
   12.16 +        HVMTRACE_0D(MCE, curr);
   12.17          do_machine_check(regs);
   12.18          break;
   12.19      default:
   12.20 @@ -2743,10 +2744,10 @@ static void vmx_failed_vmentry(unsigned 
   12.21      }
   12.22  
   12.23      printk("************* VMCS Area **************\n");
   12.24 -    vmcs_dump_vcpu();
   12.25 +    vmcs_dump_vcpu(curr);
   12.26      printk("**************************************\n");
   12.27  
   12.28 -    domain_crash(current->domain);
   12.29 +    domain_crash(curr->domain);
   12.30  }
   12.31  
   12.32  asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
    13.1 --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c	Thu Nov 29 12:15:43 2007 -0700
    13.2 +++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c	Fri Nov 30 08:54:33 2007 -0700
    13.3 @@ -276,6 +276,9 @@ static int __iommu_flush_context(
    13.4      unsigned long flag;
    13.5      unsigned long start_time;
    13.6  
    13.7 +    /* Domain id in context is 1 based */
    13.8 +    did++;
    13.9 +
   13.10      /*
   13.11       * In the non-present entry flush case, if hardware doesn't cache
   13.12       * non-present entry we do nothing and if hardware cache non-present
   13.13 @@ -360,6 +363,9 @@ static int __iommu_flush_iotlb(struct io
   13.14      unsigned long flag;
   13.15      unsigned long start_time;
   13.16  
   13.17 +    /* Domain id in context is 1 based */
   13.18 +    did++;
   13.19 +
   13.20      /*
   13.21       * In the non-present entry flush case, if hardware doesn't cache
   13.22       * non-present entry we do nothing and if hardware cache non-present
   13.23 @@ -1037,6 +1043,18 @@ static int domain_context_mapping_one(
   13.24          context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
   13.25      else
   13.26      {
   13.27 +        if ( !hd->pgd )
   13.28 +        {
   13.29 +            struct dma_pte *pgd = (struct dma_pte *)alloc_xenheap_page();
   13.30 +            if ( !pgd )
   13.31 +            {
   13.32 +                spin_unlock_irqrestore(&hd->mapping_lock, flags);
   13.33 +                return -ENOMEM;
   13.34 +            }
   13.35 +            memset(pgd, 0, PAGE_SIZE);
   13.36 +            hd->pgd = pgd;
   13.37 +        }
   13.38 + 
   13.39          context_set_address_root(*context, virt_to_maddr(hd->pgd));
   13.40          context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
   13.41      }
   13.42 @@ -1429,7 +1447,7 @@ void iommu_domain_teardown(struct domain
   13.43      return_devices_to_dom0(d);
   13.44  }
   13.45  
   13.46 -static int domain_context_mapped(struct domain *domain, struct pci_dev *pdev)
   13.47 +static int domain_context_mapped(struct pci_dev *pdev)
   13.48  {
   13.49      struct acpi_drhd_unit *drhd;
   13.50      struct iommu *iommu;
   13.51 @@ -1589,7 +1607,7 @@ static int iommu_prepare_rmrr_dev(
   13.52      if ( ret )
   13.53          return ret;
   13.54  
   13.55 -    if ( domain_context_mapped(d, pdev) == 0 )
   13.56 +    if ( domain_context_mapped(pdev) == 0 )
   13.57      {
   13.58          drhd = acpi_find_matched_drhd_unit(pdev);
   13.59          ret = domain_context_mapping(d, drhd->iommu, pdev);
    14.1 --- a/xen/arch/x86/x86_32/xen.lds.S	Thu Nov 29 12:15:43 2007 -0700
    14.2 +++ b/xen/arch/x86/x86_32/xen.lds.S	Fri Nov 30 08:54:33 2007 -0700
    14.3 @@ -63,8 +63,9 @@ SECTIONS
    14.4    __initcall_start = .;
    14.5    .initcall.init : { *(.initcall1.init) } :text
    14.6    __initcall_end = .;
    14.7 -   .xsm_initcall.init : { __xsm_initcall_start = .; 
    14.8 -   *(.xsm_initcall.init) __xsm_initcall_end = .; }
    14.9 +  __xsm_initcall_start = .; 
   14.10 +  .xsm_initcall.init : { *(.xsm_initcall.init) } :text
   14.11 +  __xsm_initcall_end = .;
   14.12    . = ALIGN(PAGE_SIZE);
   14.13    __init_end = .;
   14.14  
    15.1 --- a/xen/arch/x86/x86_64/xen.lds.S	Thu Nov 29 12:15:43 2007 -0700
    15.2 +++ b/xen/arch/x86/x86_64/xen.lds.S	Fri Nov 30 08:54:33 2007 -0700
    15.3 @@ -61,6 +61,9 @@ SECTIONS
    15.4    __initcall_start = .;
    15.5    .initcall.init : { *(.initcall1.init) } :text
    15.6    __initcall_end = .;
    15.7 +  __xsm_initcall_start = .; 
    15.8 +  .xsm_initcall.init : { *(.xsm_initcall.init) } :text
    15.9 +  __xsm_initcall_end = .;
   15.10    . = ALIGN(PAGE_SIZE);
   15.11    __init_end = .;
   15.12  
    16.1 --- a/xen/arch/x86/x86_emulate.c	Thu Nov 29 12:15:43 2007 -0700
    16.2 +++ b/xen/arch/x86/x86_emulate.c	Fri Nov 30 08:54:33 2007 -0700
    16.3 @@ -152,7 +152,8 @@ static uint8_t opcode_table[256] = {
    16.4      DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov,
    16.5      ByteOp|DstMem|SrcImm|ModRM|Mov, DstMem|SrcImm|ModRM|Mov,
    16.6      /* 0xC8 - 0xCF */
    16.7 -    0, 0, 0, 0, ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
    16.8 +    ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
    16.9 +    ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
   16.10      /* 0xD0 - 0xD7 */
   16.11      ByteOp|DstMem|SrcImplicit|ModRM, DstMem|SrcImplicit|ModRM, 
   16.12      ByteOp|DstMem|SrcImplicit|ModRM, DstMem|SrcImplicit|ModRM, 
   16.13 @@ -190,7 +191,7 @@ static uint8_t twobyte_table[256] = {
   16.14      /* 0x28 - 0x2F */
   16.15      0, 0, 0, 0, 0, 0, 0, 0,
   16.16      /* 0x30 - 0x37 */
   16.17 -    ImplicitOps, 0, ImplicitOps, 0, 0, 0, 0, 0,
   16.18 +    ImplicitOps, ImplicitOps, ImplicitOps, 0, 0, 0, 0, 0,
   16.19      /* 0x38 - 0x3F */
   16.20      0, 0, 0, 0, 0, 0, 0, 0,
   16.21      /* 0x40 - 0x47 */
   16.22 @@ -227,10 +228,10 @@ static uint8_t twobyte_table[256] = {
   16.23      ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov,
   16.24      /* 0xA0 - 0xA7 */
   16.25      ImplicitOps, ImplicitOps, ImplicitOps, DstBitBase|SrcReg|ModRM,
   16.26 -    0, 0, 0, 0, 
   16.27 +    DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, 0, 
   16.28      /* 0xA8 - 0xAF */
   16.29      ImplicitOps, ImplicitOps, 0, DstBitBase|SrcReg|ModRM,
   16.30 -    0, 0, 0, DstReg|SrcMem|ModRM,
   16.31 +    DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, DstReg|SrcMem|ModRM,
   16.32      /* 0xB0 - 0xB7 */
   16.33      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
   16.34      DstReg|SrcMem|ModRM|Mov, DstBitBase|SrcReg|ModRM,
   16.35 @@ -270,6 +271,13 @@ struct operand {
   16.36      };
   16.37  };
   16.38  
   16.39 +/* MSRs. */
   16.40 +#define MSR_TSC   0x10
   16.41 +
   16.42 +/* Control register flags. */
   16.43 +#define CR0_PE    (1<<0)
   16.44 +#define CR4_TSD   (1<<2)
   16.45 +
   16.46  /* EFLAGS bit definitions. */
   16.47  #define EFLG_VIP  (1<<20)
   16.48  #define EFLG_VIF  (1<<19)
   16.49 @@ -476,13 +484,13 @@ do{ asm volatile (                      
   16.50  })
   16.51  #define insn_fetch_type(_type) ((_type)insn_fetch_bytes(sizeof(_type)))
   16.52  
   16.53 -#define _truncate_ea(ea, byte_width)            \
   16.54 +#define truncate_word(ea, byte_width)           \
   16.55  ({  unsigned long __ea = (ea);                  \
   16.56      unsigned int _width = (byte_width);         \
   16.57      ((_width == sizeof(unsigned long)) ? __ea : \
   16.58       (__ea & ((1UL << (_width << 3)) - 1)));    \
   16.59  })
   16.60 -#define truncate_ea(ea) _truncate_ea((ea), ad_bytes)
   16.61 +#define truncate_ea(ea) truncate_word((ea), ad_bytes)
   16.62  
   16.63  #define mode_64bit() (def_ad_bytes == 8)
   16.64  
   16.65 @@ -500,12 +508,11 @@ do {                                    
   16.66      }                                                                   \
   16.67  })
   16.68  
   16.69 -/* Given byte has even parity (even number of 1s)? */
   16.70 -static int even_parity(uint8_t v)
   16.71 +/* Given longword has even parity (even number of 1s)? */
   16.72 +static int even_parity(unsigned long v)
   16.73  {
   16.74 -    asm ( "test %%al,%%al; setp %%al"
   16.75 -              : "=a" (v) : "0" (v) );
   16.76 -    return v;
   16.77 +    asm ( "test %0,%0; setp %b0" : "=a" (v) : "0" (v) );
   16.78 +    return (uint8_t)v;
   16.79  }
   16.80  
   16.81  /* Update address held in a register, based on addressing mode. */
   16.82 @@ -526,10 +533,10 @@ do {                                    
   16.83  
   16.84  #define sp_pre_dec(dec) ({                                              \
   16.85      _register_address_increment(_regs.esp, -(dec), ctxt->sp_size/8);    \
   16.86 -    _truncate_ea(_regs.esp, ctxt->sp_size/8);                           \
   16.87 +    truncate_word(_regs.esp, ctxt->sp_size/8);                          \
   16.88  })
   16.89  #define sp_post_inc(inc) ({                                             \
   16.90 -    unsigned long __esp = _truncate_ea(_regs.esp, ctxt->sp_size/8);     \
   16.91 +    unsigned long __esp = truncate_word(_regs.esp, ctxt->sp_size/8);    \
   16.92      _register_address_increment(_regs.esp, (inc), ctxt->sp_size/8);     \
   16.93      __esp;                                                              \
   16.94  })
   16.95 @@ -738,7 +745,7 @@ in_realmode(
   16.96          return 0;
   16.97  
   16.98      rc = ops->read_cr(0, &cr0, ctxt);
   16.99 -    return (!rc && !(cr0 & 1));
  16.100 +    return (!rc && !(cr0 & CR0_PE));
  16.101  }
  16.102  
  16.103  static int
  16.104 @@ -1477,7 +1484,7 @@ x86_emulate(
  16.105      case 0xc4: /* les */ {
  16.106          unsigned long sel;
  16.107          dst.val = x86_seg_es;
  16.108 -    les:
  16.109 +    les: /* dst.val identifies the segment */
  16.110          generate_exception_if(src.type != OP_MEM, EXC_UD);
  16.111          if ( (rc = ops->read(src.mem.seg, src.mem.off + src.bytes,
  16.112                               &sel, 2, ctxt)) != 0 )
  16.113 @@ -1716,7 +1723,8 @@ x86_emulate(
  16.114              break;
  16.115          case 2: /* call (near) */
  16.116          case 4: /* jmp (near) */
  16.117 -            if ( ((op_bytes = dst.bytes) != 8) && mode_64bit() )
  16.118 +            dst.type = OP_NONE;
  16.119 +            if ( (dst.bytes != 8) && mode_64bit() )
  16.120              {
  16.121                  dst.bytes = op_bytes = 8;
  16.122                  if ( dst.type == OP_REG )
  16.123 @@ -1732,7 +1740,7 @@ x86_emulate(
  16.124              break;
  16.125          case 3: /* call (far, absolute indirect) */
  16.126          case 5: /* jmp (far, absolute indirect) */ {
  16.127 -            unsigned long sel, eip = dst.val;
  16.128 +            unsigned long sel;
  16.129  
  16.130              if ( (rc = ops->read(dst.mem.seg, dst.mem.off+dst.bytes,
  16.131                                   &sel, 2, ctxt)) )
  16.132 @@ -1752,7 +1760,7 @@ x86_emulate(
  16.133  
  16.134              if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
  16.135                  goto done;
  16.136 -            _regs.eip = eip;
  16.137 +            _regs.eip = dst.val;
  16.138  
  16.139              dst.type = OP_NONE;
  16.140              break;
  16.141 @@ -1859,7 +1867,7 @@ x86_emulate(
  16.142          src.val = x86_seg_es;
  16.143      pop_seg:
  16.144          fail_if(ops->write_segment == NULL);
  16.145 -        /* 64-bit mode: PUSH defaults to a 64-bit operand. */
  16.146 +        /* 64-bit mode: POP defaults to a 64-bit operand. */
  16.147          if ( mode_64bit() && (op_bytes == 4) )
  16.148              op_bytes = 8;
  16.149          if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
  16.150 @@ -1907,7 +1915,7 @@ x86_emulate(
  16.151          _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
  16.152          _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
  16.153          _regs.eflags |= (( int8_t)_regs.eax <  0) ? EFLG_SF : 0;
  16.154 -        _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0;
  16.155 +        _regs.eflags |= even_parity((uint8_t)_regs.eax) ? EFLG_PF : 0;
  16.156          break;
  16.157      }
  16.158  
  16.159 @@ -1931,7 +1939,7 @@ x86_emulate(
  16.160          _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
  16.161          _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
  16.162          _regs.eflags |= (( int8_t)_regs.eax <  0) ? EFLG_SF : 0;
  16.163 -        _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0;
  16.164 +        _regs.eflags |= even_parity((uint8_t)_regs.eax) ? EFLG_PF : 0;
  16.165          break;
  16.166      }
  16.167  
  16.168 @@ -1998,9 +2006,18 @@ x86_emulate(
  16.169              (unsigned long *)&_regs.ecx, (unsigned long *)&_regs.eax };
  16.170          generate_exception_if(mode_64bit(), EXC_UD);
  16.171          for ( i = 0; i < 8; i++ )
  16.172 +        {
  16.173              if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
  16.174 -                                 regs[i], op_bytes, ctxt)) != 0 )
  16.175 -            goto done;
  16.176 +                                 &dst.val, op_bytes, ctxt)) != 0 )
  16.177 +                goto done;
  16.178 +            switch ( op_bytes )
  16.179 +            {
  16.180 +            case 1: *(uint8_t  *)regs[i] = (uint8_t)dst.val; break;
  16.181 +            case 2: *(uint16_t *)regs[i] = (uint16_t)dst.val; break;
  16.182 +            case 4: *regs[i] = (uint32_t)dst.val; break; /* 64b: zero-ext */
  16.183 +            case 8: *regs[i] = dst.val; break;
  16.184 +            }
  16.185 +        }
  16.186          break;
  16.187      }
  16.188  
  16.189 @@ -2262,6 +2279,77 @@ x86_emulate(
  16.190          break;
  16.191      }
  16.192  
  16.193 +    case 0xc8: /* enter imm16,imm8 */ {
  16.194 +        uint16_t size = insn_fetch_type(uint16_t);
  16.195 +        uint8_t depth = insn_fetch_type(uint8_t) & 31;
  16.196 +        int i;
  16.197 +
  16.198 +        dst.type = OP_REG;
  16.199 +        dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes;
  16.200 +        dst.reg = (unsigned long *)&_regs.ebp;
  16.201 +        if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
  16.202 +                              _regs.ebp, dst.bytes, ctxt)) )
  16.203 +            goto done;
  16.204 +        dst.val = _regs.esp;
  16.205 +
  16.206 +        if ( depth > 0 )
  16.207 +        {
  16.208 +            for ( i = 1; i < depth; i++ )
  16.209 +            {
  16.210 +                unsigned long ebp, temp_data;
  16.211 +                ebp = truncate_word(_regs.ebp - i*dst.bytes, ctxt->sp_size/8);
  16.212 +                if ( (rc = ops->read(x86_seg_ss, ebp,
  16.213 +                                     &temp_data, dst.bytes, ctxt)) ||
  16.214 +                     (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
  16.215 +                                      temp_data, dst.bytes, ctxt)) )
  16.216 +                    goto done;
  16.217 +            }
  16.218 +            if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
  16.219 +                                  dst.val, dst.bytes, ctxt)) )
  16.220 +                goto done;
  16.221 +        }
  16.222 +
  16.223 +        sp_pre_dec(size);
  16.224 +        break;
  16.225 +    }
  16.226 +
  16.227 +    case 0xc9: /* leave */
  16.228 +        /* First writeback, to %%esp. */
  16.229 +        dst.type = OP_REG;
  16.230 +        dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes;
  16.231 +        dst.reg = (unsigned long *)&_regs.esp;
  16.232 +        dst.val = _regs.ebp;
  16.233 +
  16.234 +        /* Flush first writeback, since there is a second. */
  16.235 +        switch ( dst.bytes )
  16.236 +        {
  16.237 +        case 1: *(uint8_t  *)dst.reg = (uint8_t)dst.val; break;
  16.238 +        case 2: *(uint16_t *)dst.reg = (uint16_t)dst.val; break;
  16.239 +        case 4: *dst.reg = (uint32_t)dst.val; break; /* 64b: zero-ext */
  16.240 +        case 8: *dst.reg = dst.val; break;
  16.241 +        }
  16.242 +
  16.243 +        /* Second writeback, to %%ebp. */
  16.244 +        dst.reg = (unsigned long *)&_regs.ebp;
  16.245 +        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
  16.246 +                             &dst.val, dst.bytes, ctxt)) )
  16.247 +            goto done;
  16.248 +        break;
  16.249 +
  16.250 +    case 0xca: /* ret imm16 (far) */
  16.251 +    case 0xcb: /* ret (far) */ {
  16.252 +        int offset = (b == 0xca) ? insn_fetch_type(uint16_t) : 0;
  16.253 +        op_bytes = mode_64bit() ? 8 : op_bytes;
  16.254 +        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
  16.255 +                             &dst.val, op_bytes, ctxt)) || 
  16.256 +             (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset),
  16.257 +                             &src.val, op_bytes, ctxt)) ||
  16.258 +             (rc = load_seg(x86_seg_cs, (uint16_t)src.val, ctxt, ops)) )
  16.259 +            goto done;
  16.260 +        _regs.eip = dst.val;
  16.261 +        break;
  16.262 +    }
  16.263 +
  16.264      case 0xcc: /* int3 */
  16.265          src.val = EXC_BP;
  16.266          goto swint;
  16.267 @@ -2317,7 +2405,7 @@ x86_emulate(
  16.268          _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
  16.269          _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
  16.270          _regs.eflags |= (( int8_t)_regs.eax <  0) ? EFLG_SF : 0;
  16.271 -        _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0;
  16.272 +        _regs.eflags |= even_parity((uint8_t)_regs.eax) ? EFLG_PF : 0;
  16.273          break;
  16.274      }
  16.275  
  16.276 @@ -2329,7 +2417,7 @@ x86_emulate(
  16.277          _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
  16.278          _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
  16.279          _regs.eflags |= (( int8_t)_regs.eax <  0) ? EFLG_SF : 0;
  16.280 -        _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0;
  16.281 +        _regs.eflags |= even_parity((uint8_t)_regs.eax) ? EFLG_PF : 0;
  16.282          break;
  16.283      }
  16.284  
  16.285 @@ -2528,6 +2616,35 @@ x86_emulate(
  16.286          emulate_2op_SrcV_nobyte("bt", src, dst, _regs.eflags);
  16.287          break;
  16.288  
  16.289 +    case 0xa4: /* shld imm8,r,r/m */
  16.290 +    case 0xa5: /* shld %%cl,r,r/m */
  16.291 +    case 0xac: /* shrd imm8,r,r/m */
  16.292 +    case 0xad: /* shrd %%cl,r,r/m */ {
  16.293 +        uint8_t shift, width = dst.bytes << 3;
  16.294 +        shift = (b & 1) ? (uint8_t)_regs.ecx : insn_fetch_type(uint8_t);
  16.295 +        if ( (shift &= width - 1) == 0 )
  16.296 +            break;
  16.297 +        dst.orig_val = truncate_word(dst.orig_val, dst.bytes);
  16.298 +        dst.val = ((shift == width) ? src.val :
  16.299 +                   (b & 8) ?
  16.300 +                   /* shrd */
  16.301 +                   ((dst.orig_val >> shift) |
  16.302 +                    truncate_word(src.val << (width - shift), dst.bytes)) :
  16.303 +                   /* shld */
  16.304 +                   ((dst.orig_val << shift) |
  16.305 +                    ((src.val >> (width - shift)) & ((1ull << shift) - 1))));
  16.306 +        dst.val = truncate_word(dst.val, dst.bytes);
  16.307 +        _regs.eflags &= ~(EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_PF|EFLG_CF);
  16.308 +        if ( (dst.val >> ((b & 8) ? (shift - 1) : (width - shift))) & 1 )
  16.309 +            _regs.eflags |= EFLG_CF;
  16.310 +        if ( ((dst.val ^ dst.orig_val) >> (width - 1)) & 1 )
  16.311 +            _regs.eflags |= EFLG_OF;
  16.312 +        _regs.eflags |= ((dst.val >> (width - 1)) & 1) ? EFLG_SF : 0;
  16.313 +        _regs.eflags |= (dst.val == 0) ? EFLG_ZF : 0;
  16.314 +        _regs.eflags |= even_parity(dst.val) ? EFLG_PF : 0;
  16.315 +        break;
  16.316 +    }
  16.317 +
  16.318      case 0xb3: btr: /* btr */
  16.319          emulate_2op_SrcV_nobyte("btr", src, dst, _regs.eflags);
  16.320          break;
  16.321 @@ -2651,6 +2768,7 @@ x86_emulate(
  16.322      {
  16.323      case 0x01: /* Grp7 */ {
  16.324          struct segment_register reg;
  16.325 +        unsigned long base, limit, cr0, cr0w;
  16.326  
  16.327          switch ( modrm_reg & 7 )
  16.328          {
  16.329 @@ -2676,11 +2794,12 @@ x86_emulate(
  16.330              fail_if(ops->write_segment == NULL);
  16.331              memset(&reg, 0, sizeof(reg));
  16.332              if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0,
  16.333 -                                 (unsigned long *)&reg.limit, 2, ctxt)) ||
  16.334 +                                 &limit, 2, ctxt)) ||
  16.335                   (rc = ops->read(ea.mem.seg, ea.mem.off+2,
  16.336 -                                 (unsigned long *)&reg.base,
  16.337 -                                 mode_64bit() ? 8 : 4, ctxt)) )
  16.338 +                                 &base, mode_64bit() ? 8 : 4, ctxt)) )
  16.339                  goto done;
  16.340 +            reg.base = base;
  16.341 +            reg.limit = limit;
  16.342              if ( op_bytes == 2 )
  16.343                  reg.base &= 0xffffff;
  16.344              if ( (rc = ops->write_segment((modrm_reg & 1) ?
  16.345 @@ -2688,6 +2807,29 @@ x86_emulate(
  16.346                                            &reg, ctxt)) )
  16.347                  goto done;
  16.348              break;
  16.349 +        case 4: /* smsw */
  16.350 +            ea.bytes = 2;
  16.351 +            dst = ea;
  16.352 +            fail_if(ops->read_cr == NULL);
  16.353 +            if ( (rc = ops->read_cr(0, &dst.val, ctxt)) )
  16.354 +                goto done;
  16.355 +            d |= Mov; /* force writeback */
  16.356 +            break;
  16.357 +        case 6: /* lmsw */
  16.358 +            fail_if(ops->read_cr == NULL);
  16.359 +            fail_if(ops->write_cr == NULL);
  16.360 +            if ( (rc = ops->read_cr(0, &cr0, ctxt)) )
  16.361 +                goto done;
  16.362 +            if ( ea.type == OP_REG )
  16.363 +                cr0w = *ea.reg;
  16.364 +            else if ( (rc = ops->read(ea.mem.seg, ea.mem.off,
  16.365 +                                      &cr0w, 2, ctxt)) )
  16.366 +                goto done;
  16.367 +            cr0 &= 0xffff0000;
  16.368 +            cr0 |= (uint16_t)cr0w;
  16.369 +            if ( (rc = ops->write_cr(0, cr0, ctxt)) )
  16.370 +                goto done;
  16.371 +            break;
  16.372          default:
  16.373              goto cannot_emulate;
  16.374          }
  16.375 @@ -2763,6 +2905,21 @@ x86_emulate(
  16.376          break;
  16.377      }
  16.378  
  16.379 +    case 0x31: /* rdtsc */ {
  16.380 +        unsigned long cr4;
  16.381 +        uint64_t val;
  16.382 +        fail_if(ops->read_cr == NULL);
  16.383 +        if ( (rc = ops->read_cr(4, &cr4, ctxt)) )
  16.384 +            goto done;
  16.385 +        generate_exception_if((cr4 & CR4_TSD) && !mode_ring0(), EXC_GP);
  16.386 +        fail_if(ops->read_msr == NULL);
  16.387 +        if ( (rc = ops->read_msr(MSR_TSC, &val, ctxt)) != 0 )
  16.388 +            goto done;
  16.389 +        _regs.edx = (uint32_t)(val >> 32);
  16.390 +        _regs.eax = (uint32_t)(val >>  0);
  16.391 +        break;
  16.392 +    }
  16.393 +
  16.394      case 0x32: /* rdmsr */ {
  16.395          uint64_t val;
  16.396          generate_exception_if(!mode_ring0(), EXC_GP);
    17.1 --- a/xen/drivers/char/console.c	Thu Nov 29 12:15:43 2007 -0700
    17.2 +++ b/xen/drivers/char/console.c	Fri Nov 30 08:54:33 2007 -0700
    17.3 @@ -835,7 +835,7 @@ void debugtrace_printk(const char *fmt, 
    17.4      snprintf(buf, sizeof(buf), "%u ", ++count);
    17.5  
    17.6      va_start(args, fmt);
    17.7 -    (void)vsnprintf(buf + strlen(buf), sizeof(buf), fmt, args);
    17.8 +    (void)vsnprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), fmt, args);
    17.9      va_end(args);
   17.10  
   17.11      if ( debugtrace_send_to_console )
    18.1 --- a/xen/include/asm-x86/hvm/vmx/intel-iommu.h	Thu Nov 29 12:15:43 2007 -0700
    18.2 +++ b/xen/include/asm-x86/hvm/vmx/intel-iommu.h	Fri Nov 30 08:54:33 2007 -0700
    18.3 @@ -89,7 +89,8 @@
    18.4  #define cap_plmr(c)        (((c) >> 5) & 1)
    18.5  #define cap_rwbf(c)        (((c) >> 4) & 1)
    18.6  #define cap_afl(c)        (((c) >> 3) & 1)
    18.7 -#define cap_ndoms(c)        (2 ^ (4 + 2 * ((c) & 0x7)))
    18.8 +#define cap_ndoms(c)        (1 << (4 + 2 * ((c) & 0x7)))
    18.9 +
   18.10  /*
   18.11   * Extended Capability Register
   18.12   */
    19.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Nov 29 12:15:43 2007 -0700
    19.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Fri Nov 30 08:54:33 2007 -0700
    19.3 @@ -28,7 +28,7 @@
    19.4  #endif
    19.5  
    19.6  extern void start_vmx(void);
    19.7 -extern void vmcs_dump_vcpu(void);
    19.8 +extern void vmcs_dump_vcpu(struct vcpu *v);
    19.9  extern void setup_vmcs_dump(void);
   19.10  extern int  vmx_cpu_up(void);
   19.11  extern void vmx_cpu_down(void);
    20.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Nov 29 12:15:43 2007 -0700
    20.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Fri Nov 30 08:54:33 2007 -0700
    20.3 @@ -37,7 +37,7 @@ void vmx_cpuid_intercept(
    20.4      unsigned int *eax, unsigned int *ebx,
    20.5      unsigned int *ecx, unsigned int *edx);
    20.6  void vmx_wbinvd_intercept(void);
    20.7 -int vmx_realmode(struct cpu_user_regs *regs);
    20.8 +void vmx_realmode(struct cpu_user_regs *regs);
    20.9  int vmx_realmode_io_complete(void);
   20.10  
   20.11  /*