ia64/xen-unstable

changeset 16973:e4edc310e949

hvm: FPU management cleanups.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Sun Feb 03 10:22:08 2008 +0000 (2008-02-03)
parents aecbf98aa709
children 90844659c458
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Sun Feb 03 09:30:59 2008 +0000
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Sun Feb 03 10:22:08 2008 +0000
     1.3 @@ -124,9 +124,6 @@ void hvm_do_resume(struct vcpu *v)
     1.4  {
     1.5      ioreq_t *p;
     1.6  
     1.7 -    if ( !v->fpu_dirtied )
     1.8 -        hvm_funcs.stts(v);
     1.9 -
    1.10      pt_restore_timer(v);
    1.11  
    1.12      /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Sun Feb 03 09:30:59 2008 +0000
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Sun Feb 03 10:22:08 2008 +0000
     2.3 @@ -426,6 +426,34 @@ static int svm_load_vmcb_ctxt(struct vcp
     2.4      return 0;
     2.5  }
     2.6  
     2.7 +static void svm_fpu_enter(struct vcpu *v)
     2.8 +{
     2.9 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    2.10 +
    2.11 +    setup_fpu(v);
    2.12 +    vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
    2.13 +}
    2.14 +
    2.15 +static void svm_fpu_leave(struct vcpu *v)
    2.16 +{
    2.17 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    2.18 +
    2.19 +    ASSERT(!v->fpu_dirtied);
    2.20 +    ASSERT(read_cr0() & X86_CR0_TS);
    2.21 +
    2.22 +    /*
    2.23 +     * If the guest does not have TS enabled then we must cause and handle an 
    2.24 +     * exception on first use of the FPU. If the guest *does* have TS enabled 
    2.25 +     * then this is not necessary: no FPU activity can occur until the guest 
    2.26 +     * clears CR0.TS, and we will initialise the FPU when that happens.
    2.27 +     */
    2.28 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
    2.29 +    {
    2.30 +        v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
    2.31 +        vmcb->cr0 |= X86_CR0_TS;
    2.32 +    }
    2.33 +}
    2.34 +
    2.35  static enum hvm_intblk svm_interrupt_blocked(
    2.36      struct vcpu *v, struct hvm_intack intack)
    2.37  {
    2.38 @@ -470,19 +498,22 @@ static void svm_update_guest_cr(struct v
    2.39  
    2.40      switch ( cr )
    2.41      {
    2.42 -    case 0:
    2.43 -        /* TS cleared? Then initialise FPU now. */
    2.44 -        if ( (v == current) && !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) &&
    2.45 -             (vmcb->cr0 & X86_CR0_TS) )
    2.46 +    case 0: {
    2.47 +        unsigned long hw_cr0_mask = 0;
    2.48 +
    2.49 +        if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
    2.50          {
    2.51 -            setup_fpu(v);
    2.52 -            vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
    2.53 +            if ( v != current )
    2.54 +                hw_cr0_mask |= X86_CR0_TS;
    2.55 +            else if ( vmcb->cr0 & X86_CR0_TS )
    2.56 +                svm_fpu_enter(v);
    2.57          }
    2.58  
    2.59 -        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
    2.60 +        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
    2.61          if ( !paging_mode_hap(v->domain) )
    2.62              vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
    2.63          break;
    2.64 +    }
    2.65      case 2:
    2.66          vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2];
    2.67          break;
    2.68 @@ -664,24 +695,6 @@ static void svm_set_segment_register(str
    2.69          svm_vmload(vmcb);
    2.70  }
    2.71  
    2.72 -/* Make sure that xen intercepts any FP accesses from current */
    2.73 -static void svm_stts(struct vcpu *v) 
    2.74 -{
    2.75 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    2.76 -
    2.77 -    /*
    2.78 -     * If the guest does not have TS enabled then we must cause and handle an 
    2.79 -     * exception on first use of the FPU. If the guest *does* have TS enabled 
    2.80 -     * then this is not necessary: no FPU activity can occur until the guest 
    2.81 -     * clears CR0.TS, and we will initialise the FPU when that happens.
    2.82 -     */
    2.83 -    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
    2.84 -    {
    2.85 -        v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
    2.86 -        vmcb->cr0 |= X86_CR0_TS;
    2.87 -    }
    2.88 -}
    2.89 -
    2.90  static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
    2.91  {
    2.92      v->arch.hvm_svm.vmcb->tsc_offset = offset;
    2.93 @@ -711,6 +724,8 @@ static void svm_ctxt_switch_from(struct 
    2.94  {
    2.95      int cpu = smp_processor_id();
    2.96  
    2.97 +    svm_fpu_leave(v);
    2.98 +
    2.99      svm_save_dr(v);
   2.100  
   2.101      svm_sync_vmcb(v);
   2.102 @@ -883,7 +898,6 @@ static struct hvm_function_table svm_fun
   2.103      .update_guest_cr      = svm_update_guest_cr,
   2.104      .update_guest_efer    = svm_update_guest_efer,
   2.105      .flush_guest_tlbs     = svm_flush_guest_tlbs,
   2.106 -    .stts                 = svm_stts,
   2.107      .set_tsc_offset       = svm_set_tsc_offset,
   2.108      .inject_exception     = svm_inject_exception,
   2.109      .init_hypercall_page  = svm_init_hypercall_page,
   2.110 @@ -964,12 +978,11 @@ static void svm_do_nested_pgfault(paddr_
   2.111  
   2.112  static void svm_do_no_device_fault(struct vmcb_struct *vmcb)
   2.113  {
   2.114 -    struct vcpu *v = current;
   2.115 +    struct vcpu *curr = current;
   2.116  
   2.117 -    setup_fpu(v);    
   2.118 -    vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
   2.119 +    svm_fpu_enter(curr);
   2.120  
   2.121 -    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   2.122 +    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   2.123          vmcb->cr0 &= ~X86_CR0_TS;
   2.124  }
   2.125  
   2.126 @@ -1647,11 +1660,8 @@ static void svm_cr_access(
   2.127          break;
   2.128  
   2.129      case INSTR_CLTS:
   2.130 -        /* TS being cleared means that it's time to restore fpu state. */
   2.131 -        setup_fpu(current);
   2.132 -        vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
   2.133 -        vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
   2.134 -        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
   2.135 +        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
   2.136 +        svm_update_guest_cr(v, 0);
   2.137          HVMTRACE_0D(CLTS, current);
   2.138          break;
   2.139  
     3.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Sun Feb 03 09:30:59 2008 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Sun Feb 03 10:22:08 2008 +0000
     3.3 @@ -212,7 +212,7 @@ static int construct_vmcb(struct vcpu *v
     3.4      vmcb->tr.base = 0;
     3.5      vmcb->tr.limit = 0xff;
     3.6  
     3.7 -    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_TS;
     3.8 +    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
     3.9      hvm_update_guest_cr(v, 0);
    3.10  
    3.11      v->arch.hvm_vcpu.guest_cr[4] = 0;
    3.12 @@ -220,12 +220,13 @@ static int construct_vmcb(struct vcpu *v
    3.13  
    3.14      paging_update_paging_modes(v);
    3.15  
    3.16 +    vmcb->exception_intercepts = HVM_TRAP_MASK | (1U << TRAP_no_device);
    3.17 +
    3.18      if ( paging_mode_hap(v->domain) )
    3.19      {
    3.20          vmcb->np_enable = 1; /* enable nested paging */
    3.21          vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
    3.22          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
    3.23 -        vmcb->exception_intercepts = HVM_TRAP_MASK;
    3.24  
    3.25          /*
    3.26           * No point in intercepting CR3 reads, because the hardware will return
    3.27 @@ -241,7 +242,7 @@ static int construct_vmcb(struct vcpu *v
    3.28      }
    3.29      else
    3.30      {
    3.31 -        vmcb->exception_intercepts = HVM_TRAP_MASK | (1U << TRAP_page_fault);
    3.32 +        vmcb->exception_intercepts |= (1U << TRAP_page_fault);
    3.33      }
    3.34  
    3.35      return 0;
     4.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Sun Feb 03 09:30:59 2008 +0000
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Sun Feb 03 10:22:08 2008 +0000
     4.3 @@ -570,7 +570,9 @@ static int construct_vmcs(struct vcpu *v
     4.4      __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
     4.5  #endif
     4.6  
     4.7 -    __vmwrite(EXCEPTION_BITMAP, HVM_TRAP_MASK | (1U << TRAP_page_fault));
     4.8 +    __vmwrite(EXCEPTION_BITMAP, (HVM_TRAP_MASK |
     4.9 +                                 (1U << TRAP_page_fault) |
    4.10 +                                 (1U << TRAP_no_device)));
    4.11  
    4.12      v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
    4.13      hvm_update_guest_cr(v, 0);
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Sun Feb 03 09:30:59 2008 +0000
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Sun Feb 03 10:22:08 2008 +0000
     5.3 @@ -740,15 +740,42 @@ static int vmx_load_vmcs_ctxt(struct vcp
     5.4      return 0;
     5.5  }
     5.6  
     5.7 -static void vmx_ctxt_switch_from(struct vcpu *v)
     5.8 +static void vmx_fpu_enter(struct vcpu *v)
     5.9  {
    5.10 +    setup_fpu(v);
    5.11 +    __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
    5.12 +    v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
    5.13 +    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
    5.14 +}
    5.15 +
    5.16 +static void vmx_fpu_leave(struct vcpu *v)
    5.17 +{
    5.18 +    ASSERT(!v->fpu_dirtied);
    5.19      ASSERT(read_cr0() & X86_CR0_TS);
    5.20 +
    5.21      if ( !(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS) )
    5.22      {
    5.23          v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS;
    5.24          __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
    5.25      }
    5.26  
    5.27 +    /*
    5.28 +     * If the guest does not have TS enabled then we must cause and handle an
    5.29 +     * exception on first use of the FPU. If the guest *does* have TS enabled
    5.30 +     * then this is not necessary: no FPU activity can occur until the guest
    5.31 +     * clears CR0.TS, and we will initialise the FPU when that happens.
    5.32 +     */
    5.33 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
    5.34 +    {
    5.35 +        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
    5.36 +        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
    5.37 +        __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
    5.38 +    }
    5.39 +}
    5.40 +
    5.41 +static void vmx_ctxt_switch_from(struct vcpu *v)
    5.42 +{
    5.43 +    vmx_fpu_leave(v);
    5.44      vmx_save_guest_msrs(v);
    5.45      vmx_restore_host_msrs();
    5.46      vmx_save_dr(v);
    5.47 @@ -951,26 +978,6 @@ static void vmx_set_segment_register(str
    5.48      vmx_vmcs_exit(v);
    5.49  }
    5.50  
    5.51 -/* Make sure that xen intercepts any FP accesses from current */
    5.52 -static void vmx_stts(struct vcpu *v)
    5.53 -{
    5.54 -    /* VMX depends on operating on the current vcpu */
    5.55 -    ASSERT(v == current);
    5.56 -
    5.57 -    /*
    5.58 -     * If the guest does not have TS enabled then we must cause and handle an
    5.59 -     * exception on first use of the FPU. If the guest *does* have TS enabled
    5.60 -     * then this is not necessary: no FPU activity can occur until the guest
    5.61 -     * clears CR0.TS, and we will initialise the FPU when that happens.
    5.62 -     */
    5.63 -    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
    5.64 -    {
    5.65 -        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
    5.66 -        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
    5.67 -        __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
    5.68 -    }
    5.69 -}
    5.70 -
    5.71  static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
    5.72  {
    5.73      vmx_vmcs_enter(v);
    5.74 @@ -1042,21 +1049,24 @@ static void vmx_update_guest_cr(struct v
    5.75  
    5.76      switch ( cr )
    5.77      {
    5.78 -    case 0:
    5.79 -        /* TS cleared? Then initialise FPU now. */
    5.80 -        if ( (v == current) && !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) &&
    5.81 -             (v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS) )
    5.82 +    case 0: {
    5.83 +        unsigned long hw_cr0_mask =
    5.84 +            X86_CR0_NE | X86_CR0_PG | X86_CR0_WP | X86_CR0_PE;
    5.85 +
    5.86 +        if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
    5.87          {
    5.88 -            setup_fpu(v);
    5.89 -            __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
    5.90 +            if ( v != current )
    5.91 +                hw_cr0_mask |= X86_CR0_TS;
    5.92 +            else if ( v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS )
    5.93 +                vmx_fpu_enter(v);
    5.94          }
    5.95  
    5.96          v->arch.hvm_vcpu.hw_cr[0] =
    5.97 -            v->arch.hvm_vcpu.guest_cr[0] |
    5.98 -            X86_CR0_NE | X86_CR0_PG | X86_CR0_WP | X86_CR0_PE;
    5.99 +            v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
   5.100          __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
   5.101          __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
   5.102          break;
   5.103 +    }
   5.104      case 2:
   5.105          /* CR2 is updated in exit stub. */
   5.106          break;
   5.107 @@ -1153,7 +1163,6 @@ static struct hvm_function_table vmx_fun
   5.108      .update_guest_cr      = vmx_update_guest_cr,
   5.109      .update_guest_efer    = vmx_update_guest_efer,
   5.110      .flush_guest_tlbs     = vmx_flush_guest_tlbs,
   5.111 -    .stts                 = vmx_stts,
   5.112      .set_tsc_offset       = vmx_set_tsc_offset,
   5.113      .inject_exception     = vmx_inject_exception,
   5.114      .init_hypercall_page  = vmx_init_hypercall_page,
   5.115 @@ -1234,20 +1243,15 @@ static void __update_guest_eip(unsigned 
   5.116  
   5.117  void vmx_do_no_device_fault(void)
   5.118  {
   5.119 -    struct vcpu *v = current;
   5.120 -
   5.121 -    setup_fpu(current);
   5.122 -    __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
   5.123 -
   5.124 -    ASSERT(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS);
   5.125 -    v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
   5.126 -    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
   5.127 +    struct vcpu *curr = current;
   5.128 +
   5.129 +    vmx_fpu_enter(curr);
   5.130  
   5.131      /* Disable TS in guest CR0 unless the guest wants the exception too. */
   5.132 -    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   5.133 +    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
   5.134      {
   5.135 -        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
   5.136 -        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
   5.137 +        curr->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
   5.138 +        __vmwrite(GUEST_CR0, curr->arch.hvm_vcpu.hw_cr[0]);
   5.139      }
   5.140  }
   5.141  
   5.142 @@ -2226,15 +2230,8 @@ static int vmx_cr_access(unsigned long e
   5.143          mov_from_cr(cr, gp, regs);
   5.144          break;
   5.145      case TYPE_CLTS:
   5.146 -        /* We initialise the FPU now, to avoid needing another vmexit. */
   5.147 -        setup_fpu(v);
   5.148 -        __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
   5.149 -
   5.150 -        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
   5.151 -        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
   5.152 -
   5.153 -        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
   5.154 -        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
   5.155 +        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
   5.156 +        vmx_update_guest_cr(v, 0);
   5.157          HVMTRACE_0D(CLTS, current);
   5.158          break;
   5.159      case TYPE_LMSW:
     6.1 --- a/xen/include/asm-x86/hvm/hvm.h	Sun Feb 03 09:30:59 2008 +0000
     6.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Sun Feb 03 10:22:08 2008 +0000
     6.3 @@ -105,12 +105,6 @@ struct hvm_function_table {
     6.4       */
     6.5      void (*flush_guest_tlbs)(void);
     6.6  
     6.7 -    /*
     6.8 -     * Update specifics of the guest state:
     6.9 -     * 1) TS bit in guest cr0 
    6.10 -     * 2) TSC offset in guest
    6.11 -     */
    6.12 -    void (*stts)(struct vcpu *v);
    6.13      void (*set_tsc_offset)(struct vcpu *v, u64 offset);
    6.14  
    6.15      void (*inject_exception)(unsigned int trapnr, int errcode,