ia64/xen-unstable

changeset 16028:9eff4c97053b

hvm: Re-jig event delivery logic to better integrate TPR management.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Mon Oct 01 14:11:15 2007 +0100 (2007-10-01)
parents 69a74ac976cb
children 772674585a1a
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/irq.c xen/arch/x86/hvm/svm/intr.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vpic.c xen/arch/x86/hvm/vpt.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/irq.h xen/include/asm-x86/hvm/vlapic.h xen/include/asm-x86/hvm/vpic.h xen/include/asm-x86/hvm/vpt.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Mon Oct 01 13:18:19 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Mon Oct 01 14:11:15 2007 +0100
     1.3 @@ -737,7 +737,7 @@ int hvm_set_cr4(unsigned long value)
     1.4      old_cr = v->arch.hvm_vcpu.guest_cr[4];
     1.5      v->arch.hvm_vcpu.guest_cr[4] = value;
     1.6      hvm_update_guest_cr(v, 4);
     1.7 -  
     1.8 +
     1.9      /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
    1.10      if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
    1.11          paging_update_paging_modes(v);
     2.1 --- a/xen/arch/x86/hvm/irq.c	Mon Oct 01 13:18:19 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/irq.c	Mon Oct 01 14:11:15 2007 +0100
     2.3 @@ -285,49 +285,63 @@ void hvm_set_callback_via(struct domain 
     2.4      }
     2.5  }
     2.6  
     2.7 -enum hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
     2.8 +struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
     2.9  {
    2.10      struct hvm_domain *plat = &v->domain->arch.hvm_domain;
    2.11 +    int vector;
    2.12  
    2.13      if ( unlikely(v->nmi_pending) )
    2.14          return hvm_intack_nmi;
    2.15  
    2.16 -    if ( vlapic_has_interrupt(v) != -1 )
    2.17 -        return hvm_intack_lapic;
    2.18 +    if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
    2.19 +        return hvm_intack_pic(0);
    2.20  
    2.21 -    if ( !vlapic_accept_pic_intr(v) )
    2.22 -        return hvm_intack_none;
    2.23 +    vector = vlapic_has_pending_irq(v);
    2.24 +    if ( vector != -1 )
    2.25 +        return hvm_intack_lapic(vector);
    2.26  
    2.27 -    return plat->vpic[0].int_output ? hvm_intack_pic : hvm_intack_none;
    2.28 +    return hvm_intack_none;
    2.29  }
    2.30  
    2.31 -int hvm_vcpu_ack_pending_irq(struct vcpu *v, enum hvm_intack type, int *vector)
    2.32 +struct hvm_intack hvm_vcpu_ack_pending_irq(
    2.33 +    struct vcpu *v, struct hvm_intack intack)
    2.34  {
    2.35 -    switch ( type )
    2.36 +    int vector;
    2.37 +
    2.38 +    switch ( intack.source )
    2.39      {
    2.40 -    case hvm_intack_nmi:
    2.41 -        return test_and_clear_bool(v->nmi_pending);
    2.42 -    case hvm_intack_lapic:
    2.43 -        return ((*vector = cpu_get_apic_interrupt(v)) != -1);
    2.44 -    case hvm_intack_pic:
    2.45 +    case hvm_intsrc_nmi:
    2.46 +        if ( !test_and_clear_bool(v->nmi_pending) )
    2.47 +            intack = hvm_intack_none;
    2.48 +        break;
    2.49 +    case hvm_intsrc_pic:
    2.50          ASSERT(v->vcpu_id == 0);
    2.51 -        return ((*vector = cpu_get_pic_interrupt(v)) != -1);
    2.52 +        if ( (vector = vpic_ack_pending_irq(v)) == -1 )
    2.53 +            intack = hvm_intack_none;
    2.54 +        else
    2.55 +            intack.vector = (uint8_t)vector;
    2.56 +        break;
    2.57 +    case hvm_intsrc_lapic:
    2.58 +        if ( !vlapic_ack_pending_irq(v, intack.vector) )
    2.59 +            intack = hvm_intack_none;
    2.60 +        break;
    2.61      default:
    2.62 +        intack = hvm_intack_none;
    2.63          break;
    2.64      }
    2.65  
    2.66 -    return 0;
    2.67 +    return intack;
    2.68  }
    2.69  
    2.70 -int get_isa_irq_vector(struct vcpu *v, int isa_irq, enum hvm_intack src)
    2.71 +int get_isa_irq_vector(struct vcpu *v, int isa_irq, enum hvm_intsrc src)
    2.72  {
    2.73      unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
    2.74  
    2.75 -    if ( src == hvm_intack_pic )
    2.76 +    if ( src == hvm_intsrc_pic )
    2.77          return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
    2.78                  + (isa_irq & 7));
    2.79  
    2.80 -    ASSERT(src == hvm_intack_lapic);
    2.81 +    ASSERT(src == hvm_intsrc_lapic);
    2.82      return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
    2.83  }
    2.84  
    2.85 @@ -345,18 +359,18 @@ int is_isa_irq_masked(struct vcpu *v, in
    2.86  
    2.87  int hvm_local_events_need_delivery(struct vcpu *v)
    2.88  {
    2.89 -    enum hvm_intack type;
    2.90 +    struct hvm_intack intack;
    2.91  
    2.92      /* TODO: Get rid of event-channel special case. */
    2.93      if ( vcpu_info(v, evtchn_upcall_pending) )
    2.94 -        type = hvm_intack_pic;
    2.95 +        intack = hvm_intack_pic(0);
    2.96      else
    2.97 -        type = hvm_vcpu_has_pending_irq(v);
    2.98 +        intack = hvm_vcpu_has_pending_irq(v);
    2.99  
   2.100 -    if ( likely(type == hvm_intack_none) )
   2.101 +    if ( likely(intack.source == hvm_intsrc_none) )
   2.102          return 0;
   2.103  
   2.104 -    return hvm_interrupts_enabled(v, type);
   2.105 +    return !hvm_interrupt_blocked(v, intack);
   2.106  }
   2.107  
   2.108  #if 0 /* Keep for debugging */
     3.1 --- a/xen/arch/x86/hvm/svm/intr.c	Mon Oct 01 13:18:19 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Mon Oct 01 14:11:15 2007 +0100
     3.3 @@ -39,19 +39,6 @@
     3.4  #include <xen/domain_page.h>
     3.5  #include <asm/hvm/trace.h>
     3.6  
     3.7 -static void svm_inject_dummy_vintr(struct vcpu *v)
     3.8 -{
     3.9 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.10 -    vintr_t intr = vmcb->vintr;
    3.11 -
    3.12 -    intr.fields.irq = 1;
    3.13 -    intr.fields.intr_masking = 1;
    3.14 -    intr.fields.vector = 0;
    3.15 -    intr.fields.prio = 0xF;
    3.16 -    intr.fields.ign_tpr = 1;
    3.17 -    vmcb->vintr = intr;
    3.18 -}
    3.19 -    
    3.20  static void svm_inject_nmi(struct vcpu *v)
    3.21  {
    3.22      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.23 @@ -80,11 +67,14 @@ static void svm_inject_extint(struct vcp
    3.24      vmcb->eventinj = event;
    3.25  }
    3.26      
    3.27 -static void enable_intr_window(struct vcpu *v, enum hvm_intack intr_source)
    3.28 +static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
    3.29  {
    3.30      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.31 +    vintr_t intr;
    3.32  
    3.33 -    ASSERT(intr_source != hvm_intack_none);
    3.34 +    ASSERT(intack.source != hvm_intsrc_none);
    3.35 +
    3.36 +    HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
    3.37  
    3.38      /*
    3.39       * Create a dummy virtual interrupt to intercept as soon as the
    3.40 @@ -95,53 +85,29 @@ static void enable_intr_window(struct vc
    3.41       * track 'NMI blocking' from NMI injection until IRET. This can be done
    3.42       * quite easily in software by intercepting the unblocking IRET.
    3.43       */
    3.44 +    intr = vmcb->vintr;
    3.45 +    intr.fields.irq     = 1;
    3.46 +    intr.fields.vector  = 0;
    3.47 +    intr.fields.prio    = intack.vector >> 4;
    3.48 +    intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic);
    3.49 +    vmcb->vintr = intr;
    3.50      vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
    3.51 -    HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
    3.52 -    svm_inject_dummy_vintr(v);
    3.53 -}
    3.54 -
    3.55 -static void update_cr8_intercept(
    3.56 -    struct vcpu *v, enum hvm_intack masked_intr_source)
    3.57 -{
    3.58 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.59 -    struct vlapic *vlapic = vcpu_vlapic(v);
    3.60 -    int max_irr;
    3.61 -
    3.62 -    vmcb->cr_intercepts &= ~CR_INTERCEPT_CR8_WRITE;
    3.63 -
    3.64 -    /*
    3.65 -     * If ExtInts are masked then that dominates the TPR --- the 'interrupt
    3.66 -     * window' has already been enabled in this case.
    3.67 -     */
    3.68 -    if ( (masked_intr_source == hvm_intack_lapic) ||
    3.69 -         (masked_intr_source == hvm_intack_pic) )
    3.70 -        return;
    3.71 -
    3.72 -    /* Is there an interrupt pending at the LAPIC? Nothing to do if not. */
    3.73 -    if ( !vlapic_enabled(vlapic) || 
    3.74 -         ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
    3.75 -        return;
    3.76 -
    3.77 -    /* Highest-priority pending interrupt is masked by the TPR? */
    3.78 -    if ( (vmcb->vintr.fields.tpr & 0xf) >= (max_irr >> 4) )
    3.79 -        vmcb->cr_intercepts |= CR_INTERCEPT_CR8_WRITE;
    3.80  }
    3.81  
    3.82  asmlinkage void svm_intr_assist(void) 
    3.83  {
    3.84      struct vcpu *v = current;
    3.85      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.86 -    enum hvm_intack intr_source;
    3.87 -    int intr_vector;
    3.88 +    struct hvm_intack intack;
    3.89  
    3.90      /* Crank the handle on interrupt state. */
    3.91      pt_update_irq(v);
    3.92      hvm_set_callback_irq_level();
    3.93  
    3.94      do {
    3.95 -        intr_source = hvm_vcpu_has_pending_irq(v);
    3.96 -        if ( likely(intr_source == hvm_intack_none) )
    3.97 -            goto out;
    3.98 +        intack = hvm_vcpu_has_pending_irq(v);
    3.99 +        if ( likely(intack.source == hvm_intsrc_none) )
   3.100 +            return;
   3.101  
   3.102          /*
   3.103           * Pending IRQs must be delayed if:
   3.104 @@ -158,31 +124,30 @@ asmlinkage void svm_intr_assist(void)
   3.105           * 2. The IRQ is masked.
   3.106           */
   3.107          if ( unlikely(vmcb->eventinj.fields.v) ||
   3.108 -             !hvm_interrupts_enabled(v, intr_source) )
   3.109 +             hvm_interrupt_blocked(v, intack) )
   3.110          {
   3.111 -            enable_intr_window(v, intr_source);
   3.112 -            goto out;
   3.113 +            enable_intr_window(v, intack);
   3.114 +            return;
   3.115          }
   3.116 -    } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
   3.117  
   3.118 -    if ( intr_source == hvm_intack_nmi )
   3.119 +        intack = hvm_vcpu_ack_pending_irq(v, intack);
   3.120 +    } while ( intack.source == hvm_intsrc_none );
   3.121 +
   3.122 +    if ( intack.source == hvm_intsrc_nmi )
   3.123      {
   3.124          svm_inject_nmi(v);
   3.125      }
   3.126      else
   3.127      {
   3.128 -        HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
   3.129 -        svm_inject_extint(v, intr_vector);
   3.130 -        pt_intr_post(v, intr_vector, intr_source);
   3.131 +        HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
   3.132 +        svm_inject_extint(v, intack.vector);
   3.133 +        pt_intr_post(v, intack);
   3.134      }
   3.135  
   3.136      /* Is there another IRQ to queue up behind this one? */
   3.137 -    intr_source = hvm_vcpu_has_pending_irq(v);
   3.138 -    if ( unlikely(intr_source != hvm_intack_none) )
   3.139 -        enable_intr_window(v, intr_source);
   3.140 -
   3.141 - out:
   3.142 -    update_cr8_intercept(v, intr_source);
   3.143 +    intack = hvm_vcpu_has_pending_irq(v);
   3.144 +    if ( unlikely(intack.source != hvm_intsrc_none) )
   3.145 +        enable_intr_window(v, intack);
   3.146  }
   3.147  
   3.148  /*
     4.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Oct 01 13:18:19 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Oct 01 14:11:15 2007 +0100
     4.3 @@ -425,16 +425,28 @@ static void svm_restore_dr(struct vcpu *
     4.4          __restore_debug_registers(v);
     4.5  }
     4.6  
     4.7 -static int svm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
     4.8 +static enum hvm_intblk svm_interrupt_blocked(
     4.9 +    struct vcpu *v, struct hvm_intack intack)
    4.10  {
    4.11      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    4.12  
    4.13 -    if ( type == hvm_intack_nmi )
    4.14 -        return !vmcb->interrupt_shadow;
    4.15 +    if ( vmcb->interrupt_shadow )
    4.16 +        return hvm_intblk_shadow;
    4.17 +
    4.18 +    if ( intack.source == hvm_intsrc_nmi )
    4.19 +        return hvm_intblk_none;
    4.20 +
    4.21 +    ASSERT((intack.source == hvm_intsrc_pic) ||
    4.22 +           (intack.source == hvm_intsrc_lapic));
    4.23  
    4.24 -    ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
    4.25 -    return (!irq_masked(guest_cpu_user_regs()->eflags) &&
    4.26 -            !vmcb->interrupt_shadow);
    4.27 +    if ( irq_masked(guest_cpu_user_regs()->eflags) )
    4.28 +        return hvm_intblk_rflags_ie;
    4.29 +
    4.30 +    if ( (intack.source == hvm_intsrc_lapic) &&
    4.31 +         ((vmcb->vintr.fields.tpr & 0xf) >= (intack.vector >> 4)) )
    4.32 +        return hvm_intblk_tpr;
    4.33 +
    4.34 +    return hvm_intblk_none;
    4.35  }
    4.36  
    4.37  static int svm_guest_x86_mode(struct vcpu *v)
    4.38 @@ -855,7 +867,7 @@ static struct hvm_function_table svm_fun
    4.39      .vcpu_destroy         = svm_vcpu_destroy,
    4.40      .save_cpu_ctxt        = svm_save_vmcb_ctxt,
    4.41      .load_cpu_ctxt        = svm_load_vmcb_ctxt,
    4.42 -    .interrupts_enabled   = svm_interrupts_enabled,
    4.43 +    .interrupt_blocked    = svm_interrupt_blocked,
    4.44      .guest_x86_mode       = svm_guest_x86_mode,
    4.45      .get_segment_base     = svm_get_segment_base,
    4.46      .get_segment_register = svm_get_segment_register,
    4.47 @@ -1552,7 +1564,6 @@ static void mov_from_cr(int cr, int gp, 
    4.48  {
    4.49      unsigned long value = 0;
    4.50      struct vcpu *v = current;
    4.51 -    struct vlapic *vlapic = vcpu_vlapic(v);
    4.52      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    4.53  
    4.54      switch ( cr )
    4.55 @@ -1560,21 +1571,14 @@ static void mov_from_cr(int cr, int gp, 
    4.56      case 0:
    4.57          value = v->arch.hvm_vcpu.guest_cr[0];
    4.58          break;
    4.59 -    case 2:
    4.60 -        value = vmcb->cr2;
    4.61 -        break;
    4.62      case 3:
    4.63          value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
    4.64          break;
    4.65      case 4:
    4.66          value = (unsigned long)v->arch.hvm_vcpu.guest_cr[4];
    4.67          break;
    4.68 -    case 8:
    4.69 -        value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
    4.70 -        value = (value & 0xF0) >> 4;
    4.71 -        break;
    4.72 -        
    4.73      default:
    4.74 +        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
    4.75          domain_crash(v->domain);
    4.76          return;
    4.77      }
    4.78 @@ -1590,7 +1594,6 @@ static int mov_to_cr(int gpreg, int cr, 
    4.79  {
    4.80      unsigned long value;
    4.81      struct vcpu *v = current;
    4.82 -    struct vlapic *vlapic = vcpu_vlapic(v);
    4.83      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    4.84  
    4.85      value = get_reg(gpreg, regs, vmcb);
    4.86 @@ -1604,18 +1607,10 @@ static int mov_to_cr(int gpreg, int cr, 
    4.87      {
    4.88      case 0: 
    4.89          return svm_set_cr0(value);
    4.90 -
    4.91      case 3:
    4.92          return hvm_set_cr3(value);
    4.93 -
    4.94      case 4:
    4.95          return hvm_set_cr4(value);
    4.96 -
    4.97 -    case 8:
    4.98 -        vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
    4.99 -        vmcb->vintr.fields.tpr = value & 0x0F;
   4.100 -        break;
   4.101 -
   4.102      default:
   4.103          gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
   4.104          domain_crash(v->domain);
   4.105 @@ -1894,13 +1889,14 @@ static void svm_do_msr_access(
   4.106  static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
   4.107                                struct cpu_user_regs *regs)
   4.108  {
   4.109 -    enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
   4.110 +    struct hvm_intack intack = hvm_vcpu_has_pending_irq(current);
   4.111  
   4.112      __update_guest_eip(regs, 1);
   4.113  
   4.114      /* Check for interrupt not handled or new interrupt. */
   4.115      if ( vmcb->eventinj.fields.v ||
   4.116 -         ((type != hvm_intack_none) && svm_interrupts_enabled(current, type)) )
   4.117 +         ((intack.source != hvm_intsrc_none) &&
   4.118 +          !svm_interrupt_blocked(current, intack)) )
   4.119      {
   4.120          HVMTRACE_1D(HLT, current, /*int pending=*/ 1);
   4.121          return;
   4.122 @@ -2080,13 +2076,11 @@ asmlinkage void svm_vmexit_handler(struc
   4.123  
   4.124      /*
   4.125       * Before doing anything else, we need to sync up the VLAPIC's TPR with
   4.126 -     * SVM's vTPR if CR8 writes are currently disabled.  It's OK if the 
   4.127 -     * guest doesn't touch the CR8 (e.g. 32-bit Windows) because we update
   4.128 -     * the vTPR on MMIO writes to the TPR
   4.129 +     * SVM's vTPR. It's OK if the guest doesn't touch CR8 (e.g. 32-bit Windows)
   4.130 +     * because we update the vTPR on MMIO writes to the TPR.
   4.131       */
   4.132 -    if ( !(vmcb->cr_intercepts & CR_INTERCEPT_CR8_WRITE) )
   4.133 -        vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI,
   4.134 -                       (vmcb->vintr.fields.tpr & 0x0F) << 4);
   4.135 +    vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI,
   4.136 +                   (vmcb->vintr.fields.tpr & 0x0F) << 4);
   4.137  
   4.138      exit_reason = vmcb->exitcode;
   4.139  
   4.140 @@ -2222,45 +2216,14 @@ asmlinkage void svm_vmexit_handler(struc
   4.141          }
   4.142          break;
   4.143  
   4.144 -    case VMEXIT_CR0_READ:
   4.145 -        svm_cr_access(v, 0, TYPE_MOV_FROM_CR, regs);
   4.146 -        break;
   4.147 -
   4.148 -    case VMEXIT_CR2_READ:
   4.149 -        svm_cr_access(v, 2, TYPE_MOV_FROM_CR, regs);
   4.150 -        break;
   4.151 -
   4.152 -    case VMEXIT_CR3_READ:
   4.153 -        svm_cr_access(v, 3, TYPE_MOV_FROM_CR, regs);
   4.154 -        break;
   4.155 -
   4.156 -    case VMEXIT_CR4_READ:
   4.157 -        svm_cr_access(v, 4, TYPE_MOV_FROM_CR, regs);
   4.158 -        break;
   4.159 -
   4.160 -    case VMEXIT_CR8_READ:
   4.161 -        svm_cr_access(v, 8, TYPE_MOV_FROM_CR, regs);
   4.162 +    case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
   4.163 +        svm_cr_access(v, exit_reason - VMEXIT_CR0_READ,
   4.164 +                      TYPE_MOV_FROM_CR, regs);
   4.165          break;
   4.166  
   4.167 -    case VMEXIT_CR0_WRITE:
   4.168 -        svm_cr_access(v, 0, TYPE_MOV_TO_CR, regs);
   4.169 -        break;
   4.170 -
   4.171 -    case VMEXIT_CR2_WRITE:
   4.172 -        svm_cr_access(v, 2, TYPE_MOV_TO_CR, regs);
   4.173 -        break;
   4.174 -
   4.175 -    case VMEXIT_CR3_WRITE:
   4.176 -        svm_cr_access(v, 3, TYPE_MOV_TO_CR, regs);
   4.177 -        local_flush_tlb();
   4.178 -        break;
   4.179 -
   4.180 -    case VMEXIT_CR4_WRITE:
   4.181 -        svm_cr_access(v, 4, TYPE_MOV_TO_CR, regs);
   4.182 -        break;
   4.183 -
   4.184 -    case VMEXIT_CR8_WRITE:
   4.185 -        svm_cr_access(v, 8, TYPE_MOV_TO_CR, regs);
   4.186 +    case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
   4.187 +        svm_cr_access(v, exit_reason - VMEXIT_CR0_WRITE,
   4.188 +                      TYPE_MOV_TO_CR, regs);
   4.189          break;
   4.190  
   4.191      case VMEXIT_DR0_WRITE ... VMEXIT_DR7_WRITE:
     5.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Mon Oct 01 13:18:19 2007 +0100
     5.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Mon Oct 01 14:11:15 2007 +0100
     5.3 @@ -130,14 +130,11 @@ static int construct_vmcb(struct vcpu *v
     5.4      /* Intercept all debug-register writes. */
     5.5      vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
     5.6  
     5.7 -    /*
     5.8 -     * Intercept all control-register accesses except for CR2 reads/writes
     5.9 -     * and CR8 reads (and actually CR8 writes, but that's a special case
    5.10 -     * that's handled in svm/intr.c). 
    5.11 -     */
    5.12 +    /* Intercept all control-register accesses except for CR2 and CR8. */
    5.13      vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
    5.14                              CR_INTERCEPT_CR2_WRITE |
    5.15 -                            CR_INTERCEPT_CR8_READ);
    5.16 +                            CR_INTERCEPT_CR8_READ |
    5.17 +                            CR_INTERCEPT_CR8_WRITE);
    5.18  
    5.19      /* I/O and MSR permission bitmaps. */
    5.20      arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE));
     6.1 --- a/xen/arch/x86/hvm/vlapic.c	Mon Oct 01 13:18:19 2007 +0100
     6.2 +++ b/xen/arch/x86/hvm/vlapic.c	Mon Oct 01 14:11:15 2007 +0100
     6.3 @@ -732,33 +732,34 @@ int vlapic_accept_pic_intr(struct vcpu *
     6.4               vlapic_hw_disabled(vlapic)));
     6.5  }
     6.6  
     6.7 -int vlapic_has_interrupt(struct vcpu *v)
     6.8 +int vlapic_has_pending_irq(struct vcpu *v)
     6.9  {
    6.10      struct vlapic *vlapic = vcpu_vlapic(v);
    6.11 -    int highest_irr;
    6.12 +    int irr, isr;
    6.13  
    6.14      if ( !vlapic_enabled(vlapic) )
    6.15          return -1;
    6.16  
    6.17 -    highest_irr = vlapic_find_highest_irr(vlapic);
    6.18 -    if ( (highest_irr == -1) ||
    6.19 -         ((highest_irr & 0xF0) <= vlapic_get_ppr(vlapic)) )
    6.20 +    irr = vlapic_find_highest_irr(vlapic);
    6.21 +    if ( irr == -1 )
    6.22          return -1;
    6.23  
    6.24 -    return highest_irr;
    6.25 +    isr = vlapic_find_highest_isr(vlapic);
    6.26 +    isr = (isr != -1) ? isr : 0;
    6.27 +    if ( (isr & 0xf0) >= (irr & 0xf0) )
    6.28 +        return -1;
    6.29 +
    6.30 +    return irr;
    6.31  }
    6.32  
    6.33 -int cpu_get_apic_interrupt(struct vcpu *v)
    6.34 +int vlapic_ack_pending_irq(struct vcpu *v, int vector)
    6.35  {
    6.36 -    int vector = vlapic_has_interrupt(v);
    6.37      struct vlapic *vlapic = vcpu_vlapic(v);
    6.38  
    6.39 -    if ( vector == -1 )
    6.40 -        return -1;
    6.41 - 
    6.42      vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
    6.43      vlapic_clear_irr(vector, vlapic);
    6.44 -    return vector;
    6.45 +
    6.46 +    return 1;
    6.47  }
    6.48  
    6.49  /* Reset the VLPAIC back to its power-on/reset state. */
     7.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Mon Oct 01 13:18:19 2007 +0100
     7.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Mon Oct 01 14:11:15 2007 +0100
     7.3 @@ -71,14 +71,14 @@
     7.4   * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
     7.5   */
     7.6  
     7.7 -static void enable_intr_window(struct vcpu *v, enum hvm_intack intr_source)
     7.8 +static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
     7.9  {
    7.10      u32 *cpu_exec_control = &v->arch.hvm_vmx.exec_control;
    7.11      u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING;
    7.12  
    7.13 -    ASSERT(intr_source != hvm_intack_none);
    7.14 +    ASSERT(intack.source != hvm_intsrc_none);
    7.15  
    7.16 -    if ( (intr_source == hvm_intack_nmi) && cpu_has_vmx_vnmi )
    7.17 +    if ( (intack.source == hvm_intsrc_nmi) && cpu_has_vmx_vnmi )
    7.18      {
    7.19          /*
    7.20           * We set MOV-SS blocking in lieu of STI blocking when delivering an
    7.21 @@ -107,37 +107,6 @@ static void enable_intr_window(struct vc
    7.22      }
    7.23  }
    7.24  
    7.25 -static void update_tpr_threshold(
    7.26 -    struct vcpu *v, enum hvm_intack masked_intr_source)
    7.27 -{
    7.28 -    struct vlapic *vlapic = vcpu_vlapic(v);
    7.29 -    int max_irr, tpr, threshold = 0;
    7.30 -
    7.31 -    if ( !cpu_has_vmx_tpr_shadow )
    7.32 -        return;
    7.33 -
    7.34 -    /*
    7.35 -     * If ExtInts are masked then that dominates the TPR --- the 'interrupt
    7.36 -     * window' has already been enabled in this case.
    7.37 -     */
    7.38 -    if ( (masked_intr_source == hvm_intack_lapic) ||
    7.39 -         (masked_intr_source == hvm_intack_pic) )
    7.40 -        goto out;
    7.41 -
    7.42 -    /* Is there an interrupt pending at the LAPIC? Nothing to do if not. */
    7.43 -    if ( !vlapic_enabled(vlapic) || 
    7.44 -         ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
    7.45 -        goto out;
    7.46 -
    7.47 -    /* Highest-priority pending interrupt is masked by the TPR? */
    7.48 -    tpr = vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xF0;
    7.49 -    if ( (tpr >> 4) >= (max_irr >> 4) )
    7.50 -        threshold = max_irr >> 4;
    7.51 -
    7.52 - out:
    7.53 -    __vmwrite(TPR_THRESHOLD, threshold);
    7.54 -}
    7.55 -
    7.56  static void vmx_dirq_assist(struct domain *d)
    7.57  {
    7.58      unsigned int irq;
    7.59 @@ -157,10 +126,10 @@ static void vmx_dirq_assist(struct domai
    7.60  
    7.61  asmlinkage void vmx_intr_assist(void)
    7.62  {
    7.63 -    int intr_vector;
    7.64 -    enum hvm_intack intr_source;
    7.65 +    struct hvm_intack intack;
    7.66      struct vcpu *v = current;
    7.67 -    unsigned int intr_info;
    7.68 +    unsigned int tpr_threshold = 0;
    7.69 +    enum hvm_intblk intblk;
    7.70  
    7.71      /* Crank the handle on interrupt state. */
    7.72      pt_update_irq(v);
    7.73 @@ -171,41 +140,48 @@ asmlinkage void vmx_intr_assist(void)
    7.74      hvm_set_callback_irq_level();
    7.75  
    7.76      do {
    7.77 -        intr_source = hvm_vcpu_has_pending_irq(v);
    7.78 -        if ( likely(intr_source == hvm_intack_none) )
    7.79 +        intack = hvm_vcpu_has_pending_irq(v);
    7.80 +        if ( likely(intack.source == hvm_intsrc_none) )
    7.81              goto out;
    7.82  
    7.83 -        /*
    7.84 -         * An event is already pending or the pending interrupt is masked?
    7.85 -         * Then the pending interrupt must be delayed.
    7.86 -         */
    7.87 -        intr_info = __vmread(VM_ENTRY_INTR_INFO);
    7.88 -        if ( unlikely(intr_info & INTR_INFO_VALID_MASK) ||
    7.89 -             !hvm_interrupts_enabled(v, intr_source) )
    7.90 +        intblk = hvm_interrupt_blocked(v, intack);
    7.91 +        if ( intblk == hvm_intblk_tpr )
    7.92          {
    7.93 -            enable_intr_window(v, intr_source);
    7.94 +            ASSERT(vlapic_enabled(vcpu_vlapic(v)));
    7.95 +            ASSERT(intack.source == hvm_intsrc_lapic);
    7.96 +            tpr_threshold = intack.vector >> 4;
    7.97              goto out;
    7.98          }
    7.99 -    } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
   7.100  
   7.101 -    if ( intr_source == hvm_intack_nmi )
   7.102 +        if ( (intblk != hvm_intblk_none) ||
   7.103 +             (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) )
   7.104 +        {
   7.105 +            enable_intr_window(v, intack);
   7.106 +            goto out;
   7.107 +        }
   7.108 +
   7.109 +        intack = hvm_vcpu_ack_pending_irq(v, intack);
   7.110 +    } while ( intack.source == hvm_intsrc_none );
   7.111 +
   7.112 +    if ( intack.source == hvm_intsrc_nmi )
   7.113      {
   7.114          vmx_inject_nmi(v);
   7.115      }
   7.116      else
   7.117      {
   7.118 -        HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
   7.119 -        vmx_inject_extint(v, intr_vector);
   7.120 -        pt_intr_post(v, intr_vector, intr_source);
   7.121 +        HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
   7.122 +        vmx_inject_extint(v, intack.vector);
   7.123 +        pt_intr_post(v, intack);
   7.124      }
   7.125  
   7.126      /* Is there another IRQ to queue up behind this one? */
   7.127 -    intr_source = hvm_vcpu_has_pending_irq(v);
   7.128 -    if ( unlikely(intr_source != hvm_intack_none) )
   7.129 -        enable_intr_window(v, intr_source);
   7.130 +    intack = hvm_vcpu_has_pending_irq(v);
   7.131 +    if ( unlikely(intack.source != hvm_intsrc_none) )
   7.132 +        enable_intr_window(v, intack);
   7.133  
   7.134   out:
   7.135 -    update_tpr_threshold(v, intr_source);
   7.136 +    if ( cpu_has_vmx_tpr_shadow )
   7.137 +        __vmwrite(TPR_THRESHOLD, tpr_threshold);
   7.138  }
   7.139  
   7.140  /*
     8.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Oct 01 13:18:19 2007 +0100
     8.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Oct 01 14:11:15 2007 +0100
     8.3 @@ -975,20 +975,34 @@ static void vmx_init_hypercall_page(stru
     8.4      *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
     8.5  }
     8.6  
     8.7 -static int vmx_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
     8.8 +static enum hvm_intblk vmx_interrupt_blocked(
     8.9 +    struct vcpu *v, struct hvm_intack intack)
    8.10  {
    8.11      unsigned long intr_shadow;
    8.12  
    8.13      intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
    8.14  
    8.15 -    if ( type == hvm_intack_nmi )
    8.16 -        return !(intr_shadow & (VMX_INTR_SHADOW_STI|
    8.17 -                                VMX_INTR_SHADOW_MOV_SS|
    8.18 -                                VMX_INTR_SHADOW_NMI));
    8.19 -
    8.20 -    ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
    8.21 -    return (!irq_masked(guest_cpu_user_regs()->eflags) &&
    8.22 -            !(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)));
    8.23 +    if ( intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
    8.24 +        return hvm_intblk_shadow;
    8.25 +
    8.26 +    if ( intack.source == hvm_intsrc_nmi )
    8.27 +        return ((intr_shadow & VMX_INTR_SHADOW_NMI) ?
    8.28 +                hvm_intblk_nmi_iret : hvm_intblk_none);
    8.29 +
    8.30 +    ASSERT((intack.source == hvm_intsrc_pic) ||
    8.31 +           (intack.source == hvm_intsrc_lapic));
    8.32 +
    8.33 +    if ( irq_masked(guest_cpu_user_regs()->eflags) )
    8.34 +        return hvm_intblk_rflags_ie;
    8.35 +
    8.36 +    if ( intack.source == hvm_intsrc_lapic )
    8.37 +    {
    8.38 +        uint32_t tpr = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xF0;
    8.39 +        if ( (tpr >> 4) >= (intack.vector >> 4) )
    8.40 +            return hvm_intblk_tpr;
    8.41 +    }
    8.42 +
    8.43 +    return hvm_intblk_none;
    8.44  }
    8.45  
    8.46  static void vmx_update_host_cr3(struct vcpu *v)
    8.47 @@ -1112,7 +1126,7 @@ static struct hvm_function_table vmx_fun
    8.48      .vcpu_destroy         = vmx_vcpu_destroy,
    8.49      .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
    8.50      .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
    8.51 -    .interrupts_enabled   = vmx_interrupts_enabled,
    8.52 +    .interrupt_blocked    = vmx_interrupt_blocked,
    8.53      .guest_x86_mode       = vmx_guest_x86_mode,
    8.54      .get_segment_base     = vmx_get_segment_base,
    8.55      .get_segment_register = vmx_get_segment_register,
     9.1 --- a/xen/arch/x86/hvm/vpic.c	Mon Oct 01 13:18:19 2007 +0100
     9.2 +++ b/xen/arch/x86/hvm/vpic.c	Mon Oct 01 14:11:15 2007 +0100
     9.3 @@ -503,7 +503,7 @@ void vpic_irq_negative_edge(struct domai
     9.4          vpic_update_int_output(vpic);
     9.5  }
     9.6  
     9.7 -int cpu_get_pic_interrupt(struct vcpu *v)
     9.8 +int vpic_ack_pending_irq(struct vcpu *v)
     9.9  {
    9.10      int irq, vector;
    9.11      struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0];
    10.1 --- a/xen/arch/x86/hvm/vpt.c	Mon Oct 01 13:18:19 2007 +0100
    10.2 +++ b/xen/arch/x86/hvm/vpt.c	Mon Oct 01 14:11:15 2007 +0100
    10.3 @@ -165,12 +165,12 @@ void pt_update_irq(struct vcpu *v)
    10.4  }
    10.5  
    10.6  static struct periodic_time *is_pt_irq(
    10.7 -    struct vcpu *v, int vector, enum hvm_intack src)
    10.8 +    struct vcpu *v, struct hvm_intack intack)
    10.9  {
   10.10      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
   10.11      struct periodic_time *pt;
   10.12      struct RTCState *rtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
   10.13 -    int vec;
   10.14 +    int vector;
   10.15  
   10.16      list_for_each_entry ( pt, head, list )
   10.17      {
   10.18 @@ -179,15 +179,16 @@ static struct periodic_time *is_pt_irq(
   10.19  
   10.20          if ( is_lvtt(v, pt->irq) )
   10.21          {
   10.22 -            if ( pt->irq != vector )
   10.23 +            if ( pt->irq != intack.vector )
   10.24                  continue;
   10.25              return pt;
   10.26          }
   10.27  
   10.28 -        vec = get_isa_irq_vector(v, pt->irq, src);
   10.29 +        vector = get_isa_irq_vector(v, pt->irq, intack.source);
   10.30  
   10.31          /* RTC irq need special care */
   10.32 -        if ( (vector != vec) || (pt->irq == 8 && !is_rtc_periodic_irq(rtc)) )
   10.33 +        if ( (intack.vector != vector) ||
   10.34 +             ((pt->irq == 8) && !is_rtc_periodic_irq(rtc)) )
   10.35              continue;
   10.36  
   10.37          return pt;
   10.38 @@ -196,7 +197,7 @@ static struct periodic_time *is_pt_irq(
   10.39      return NULL;
   10.40  }
   10.41  
   10.42 -void pt_intr_post(struct vcpu *v, int vector, enum hvm_intack src)
   10.43 +void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
   10.44  {
   10.45      struct periodic_time *pt;
   10.46      time_cb *cb;
   10.47 @@ -204,7 +205,7 @@ void pt_intr_post(struct vcpu *v, int ve
   10.48  
   10.49      spin_lock(&v->arch.hvm_vcpu.tm_lock);
   10.50  
   10.51 -    pt = is_pt_irq(v, vector, src);
   10.52 +    pt = is_pt_irq(v, intack);
   10.53      if ( pt == NULL )
   10.54      {
   10.55          spin_unlock(&v->arch.hvm_vcpu.tm_lock);
    11.1 --- a/xen/include/asm-x86/hvm/hvm.h	Mon Oct 01 13:18:19 2007 +0100
    11.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Mon Oct 01 14:11:15 2007 +0100
    11.3 @@ -57,11 +57,26 @@ typedef struct segment_register {
    11.4  } __attribute__ ((packed)) segment_register_t;
    11.5  
    11.6  /* Interrupt acknowledgement sources. */
    11.7 -enum hvm_intack {
    11.8 -    hvm_intack_none,
    11.9 -    hvm_intack_pic,
   11.10 -    hvm_intack_lapic,
   11.11 -    hvm_intack_nmi
   11.12 +enum hvm_intsrc {
   11.13 +    hvm_intsrc_none,
   11.14 +    hvm_intsrc_pic,
   11.15 +    hvm_intsrc_lapic,
   11.16 +    hvm_intsrc_nmi
   11.17 +};
   11.18 +struct hvm_intack {
   11.19 +    uint8_t source; /* enum hvm_intsrc */
   11.20 +    uint8_t vector;
   11.21 +};
   11.22 +#define hvm_intack_none       ( (struct hvm_intack) { hvm_intsrc_none,  0 } )
   11.23 +#define hvm_intack_pic(vec)   ( (struct hvm_intack) { hvm_intsrc_pic,   vec } )
   11.24 +#define hvm_intack_lapic(vec) ( (struct hvm_intack) { hvm_intsrc_lapic, vec } )
   11.25 +#define hvm_intack_nmi        ( (struct hvm_intack) { hvm_intsrc_nmi,   2 } )
   11.26 +enum hvm_intblk {
   11.27 +    hvm_intblk_none,      /* not blocked (deliverable) */
   11.28 +    hvm_intblk_shadow,    /* MOV-SS or STI shadow */
   11.29 +    hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */
   11.30 +    hvm_intblk_tpr,       /* LAPIC TPR too high */
   11.31 +    hvm_intblk_nmi_iret   /* NMI blocked until IRET */
   11.32  };
   11.33  
   11.34  /*
   11.35 @@ -94,7 +109,7 @@ struct hvm_function_table {
   11.36       * 3) return the current guest segment descriptor base
   11.37       * 4) return the current guest segment descriptor
   11.38       */
   11.39 -    int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
   11.40 +    enum hvm_intblk (*interrupt_blocked)(struct vcpu *v, struct hvm_intack);
   11.41      int (*guest_x86_mode)(struct vcpu *v);
   11.42      unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
   11.43      void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
   11.44 @@ -177,11 +192,11 @@ u64 hvm_get_guest_time(struct vcpu *v);
   11.45  #define hvm_long_mode_enabled(v) (v,0)
   11.46  #endif
   11.47  
   11.48 -static inline int
   11.49 -hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
   11.50 +static inline enum hvm_intblk
   11.51 +hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
   11.52  {
   11.53      ASSERT(v == current);
   11.54 -    return hvm_funcs.interrupts_enabled(v, type);
   11.55 +    return hvm_funcs.interrupt_blocked(v, intack);
   11.56  }
   11.57  
   11.58  static inline int
    12.1 --- a/xen/include/asm-x86/hvm/irq.h	Mon Oct 01 13:18:19 2007 +0100
    12.2 +++ b/xen/include/asm-x86/hvm/irq.h	Mon Oct 01 14:11:15 2007 +0100
    12.3 @@ -135,11 +135,11 @@ void hvm_set_callback_irq_level(void);
    12.4  void hvm_set_callback_via(struct domain *d, uint64_t via);
    12.5  
    12.6  /* Check/Acknowledge next pending interrupt. */
    12.7 -enum hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v);
    12.8 -int hvm_vcpu_ack_pending_irq(
    12.9 -    struct vcpu *v, enum hvm_intack type, int *vector);
   12.10 +struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v);
   12.11 +struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v,
   12.12 +                                           struct hvm_intack intack);
   12.13  
   12.14 -int get_isa_irq_vector(struct vcpu *vcpu, int irq, enum hvm_intack src);
   12.15 +int get_isa_irq_vector(struct vcpu *vcpu, int irq, enum hvm_intsrc src);
   12.16  int is_isa_irq_masked(struct vcpu *v, int isa_irq);
   12.17  
   12.18  #endif /* __ASM_X86_HVM_IRQ_H__ */
    13.1 --- a/xen/include/asm-x86/hvm/vlapic.h	Mon Oct 01 13:18:19 2007 +0100
    13.2 +++ b/xen/include/asm-x86/hvm/vlapic.h	Mon Oct 01 14:11:15 2007 +0100
    13.3 @@ -75,8 +75,8 @@ int vlapic_set_irq(struct vlapic *vlapic
    13.4  
    13.5  int vlapic_find_highest_irr(struct vlapic *vlapic);
    13.6  
    13.7 -int vlapic_has_interrupt(struct vcpu *v);
    13.8 -int cpu_get_apic_interrupt(struct vcpu *v);
    13.9 +int vlapic_has_pending_irq(struct vcpu *v);
   13.10 +int vlapic_ack_pending_irq(struct vcpu *v, int vector);
   13.11  
   13.12  int  vlapic_init(struct vcpu *v);
   13.13  void vlapic_destroy(struct vcpu *v);
    14.1 --- a/xen/include/asm-x86/hvm/vpic.h	Mon Oct 01 13:18:19 2007 +0100
    14.2 +++ b/xen/include/asm-x86/hvm/vpic.h	Mon Oct 01 14:11:15 2007 +0100
    14.3 @@ -32,7 +32,7 @@
    14.4  void vpic_irq_positive_edge(struct domain *d, int irq);
    14.5  void vpic_irq_negative_edge(struct domain *d, int irq);
    14.6  void vpic_init(struct domain *d);
    14.7 -int cpu_get_pic_interrupt(struct vcpu *v);
    14.8 +int vpic_ack_pending_irq(struct vcpu *v);
    14.9  int is_periodic_irq(struct vcpu *v, int irq, int type);
   14.10  
   14.11  #endif  /* __ASM_X86_HVM_VPIC_H__ */  
    15.1 --- a/xen/include/asm-x86/hvm/vpt.h	Mon Oct 01 13:18:19 2007 +0100
    15.2 +++ b/xen/include/asm-x86/hvm/vpt.h	Mon Oct 01 14:11:15 2007 +0100
    15.3 @@ -120,7 +120,7 @@ struct pl_time {    /* platform time */
    15.4  void pt_freeze_time(struct vcpu *v);
    15.5  void pt_thaw_time(struct vcpu *v);
    15.6  void pt_update_irq(struct vcpu *v);
    15.7 -void pt_intr_post(struct vcpu *v, int vector, enum hvm_intack src);
    15.8 +void pt_intr_post(struct vcpu *v, struct hvm_intack intack);
    15.9  void pt_reset(struct vcpu *v);
   15.10  void pt_migrate(struct vcpu *v);
   15.11  void create_periodic_time(