ia64/xen-unstable

changeset 9416:b7facd6aa72e

SVM patch to fix guest time, including 64bit msr code - allowing 64bit
linux guests to enable APIC (ie. apic=1 now works in guest config file).

Signed-off-by: Tom Woller <thomas.woller@amd.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Mar 23 10:50:34 2006 +0100 (2006-03-23)
parents 72b469303d6d
children 799957f5092c
files xen/arch/x86/hvm/svm/intr.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/include/asm-x86/hvm/svm/svm.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/intr.c	Thu Mar 23 10:47:44 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Thu Mar 23 10:50:34 2006 +0100
     1.3 @@ -44,6 +44,58 @@
     1.4   */
     1.5  #define BSP_CPU(v)    (!(v->vcpu_id))
     1.6  
     1.7 +u64 svm_get_guest_time(struct vcpu *v)
     1.8 +{
     1.9 +    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
    1.10 +    u64    host_tsc;
    1.11 +    
    1.12 +    rdtscll(host_tsc);
    1.13 +    return host_tsc + vpit->cache_tsc_offset;
    1.14 +}
    1.15 +
    1.16 +void svm_set_guest_time(struct vcpu *v, u64 gtime)
    1.17 +{
    1.18 +    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
    1.19 +    u64    host_tsc;
    1.20 +   
    1.21 +    rdtscll(host_tsc);
    1.22 +    
    1.23 +    vpit->cache_tsc_offset = gtime - host_tsc;
    1.24 +    v->arch.hvm_svm.vmcb->tsc_offset = vpit->cache_tsc_offset;
    1.25 +}
    1.26 +
    1.27 +static inline void
    1.28 +interrupt_post_injection(struct vcpu * v, int vector, int type)
    1.29 +{
    1.30 +    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
    1.31 +
    1.32 +    if ( is_pit_irq(v, vector, type) ) {
    1.33 +        if ( !vpit->first_injected ) {
    1.34 +            vpit->pending_intr_nr = 0;
    1.35 +            vpit->last_pit_gtime = svm_get_guest_time(v);
    1.36 +            vpit->scheduled = NOW() + vpit->period;
    1.37 +            set_timer(&vpit->pit_timer, vpit->scheduled);
    1.38 +            vpit->first_injected = 1;
    1.39 +        } else {
    1.40 +            vpit->pending_intr_nr--;
    1.41 +        }
    1.42 +        vpit->inject_point = NOW();
    1.43 +
    1.44 +        vpit->last_pit_gtime += vpit->period;
    1.45 +        svm_set_guest_time(v, vpit->last_pit_gtime);
    1.46 +    }
    1.47 +
    1.48 +    switch(type)
    1.49 +    {
    1.50 +    case VLAPIC_DELIV_MODE_EXT:
    1.51 +        break;
    1.52 +
    1.53 +    default:
    1.54 +        vlapic_post_injection(v, vector, type);
    1.55 +        break;
    1.56 +    }
    1.57 +}
    1.58 +
    1.59  static inline int svm_inject_extint(struct vcpu *v, int trap, int error_code)
    1.60  {
    1.61      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.62 @@ -64,45 +116,6 @@ static inline int svm_inject_extint(stru
    1.63      return 0;
    1.64  }
    1.65  
    1.66 -void svm_set_tsc_shift(struct vcpu *v, struct hvm_virpit *vpit)
    1.67 -{
    1.68 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.69 -    u64    drift;
    1.70 -
    1.71 -    if ( vpit->first_injected )
    1.72 -        drift = vpit->period_cycles * vpit->pending_intr_nr;
    1.73 -    else
    1.74 -        drift = 0;
    1.75 -    vmcb->tsc_offset = ( 0 - drift );
    1.76 -}
    1.77 -
    1.78 -static inline void
    1.79 -interrupt_post_injection(struct vcpu * v, int vector, int type)
    1.80 -{
    1.81 -    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
    1.82 -
    1.83 -    if ( is_pit_irq(v, vector, type) ) {
    1.84 -            if ( !vpit->first_injected ) {
    1.85 -                vpit->first_injected = 1;
    1.86 -                vpit->pending_intr_nr = 0;
    1.87 -            }
    1.88 -            else if (vpit->pending_intr_nr) {
    1.89 -                --vpit->pending_intr_nr;
    1.90 -            }
    1.91 -            vpit->inject_point = NOW();
    1.92 -            svm_set_tsc_shift (v, vpit);
    1.93 -    }
    1.94 -
    1.95 -    switch(type)
    1.96 -    {
    1.97 -    case VLAPIC_DELIV_MODE_EXT:
    1.98 -        break;
    1.99 -
   1.100 -    default:
   1.101 -        vlapic_post_injection(v, vector, type);
   1.102 -    }
   1.103 -}
   1.104 -
   1.105  asmlinkage void svm_intr_assist(void) 
   1.106  {
   1.107      struct vcpu *v = current;
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Mar 23 10:47:44 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Mar 23 10:50:34 2006 +0100
     2.3 @@ -670,8 +670,18 @@ static void arch_svm_do_launch(struct vc
     2.4      reset_stack_and_jump(svm_asm_do_launch);
     2.5  }
     2.6  
     2.7 +static void svm_freeze_time(struct vcpu *v)
     2.8 +{
     2.9 +    struct hvm_virpit *vpit = &v->domain->arch.hvm_domain.vpit;
    2.10 +    
    2.11 +    v->domain->arch.hvm_domain.guest_time = svm_get_guest_time(v);
    2.12 +    if ( vpit->first_injected )
    2.13 +        stop_timer(&(vpit->pit_timer));
    2.14 +}
    2.15 +
    2.16  static void svm_ctxt_switch_from(struct vcpu *v)
    2.17  {
    2.18 +    svm_freeze_time(v);
    2.19  }
    2.20  
    2.21  static void svm_ctxt_switch_to(struct vcpu *v)
    2.22 @@ -911,7 +921,7 @@ static void svm_vmexit_do_cpuid(struct v
    2.23  
    2.24      if (input == 1)
    2.25      {
    2.26 -        if ( hvm_apic_support(v->domain) &&
    2.27 +        if ( !hvm_apic_support(v->domain) ||
    2.28                  !vlapic_global_enabled((VLAPIC(v))) )
    2.29              clear_bit(X86_FEATURE_APIC, &edx);
    2.30  	    
    2.31 @@ -1693,7 +1703,7 @@ static inline void svm_do_msr_access(str
    2.32  {
    2.33      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    2.34      int  inst_len;
    2.35 -    int64_t tsc_sum;
    2.36 +    u64 msr_content=0;
    2.37  
    2.38      ASSERT(vmcb);
    2.39  
    2.40 @@ -1708,24 +1718,27 @@ static inline void svm_do_msr_access(str
    2.41          inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL);
    2.42  
    2.43          regs->edx = 0;
    2.44 -        switch (regs->ecx)
    2.45 +        switch (regs->ecx) {
    2.46 +        case MSR_IA32_TIME_STAMP_COUNTER:
    2.47          {
    2.48 +            struct hvm_virpit *vpit;
    2.49 +
    2.50 +            rdtscll(msr_content);
    2.51 +            vpit = &(v->domain->arch.hvm_domain.vpit);
    2.52 +            msr_content += vpit->cache_tsc_offset;
    2.53 +            break;
    2.54 +        }
    2.55          case MSR_IA32_SYSENTER_CS:
    2.56 -            regs->eax = vmcb->sysenter_cs;
    2.57 +            msr_content = vmcb->sysenter_cs;
    2.58              break;
    2.59          case MSR_IA32_SYSENTER_ESP: 
    2.60 -            regs->eax = vmcb->sysenter_esp;
    2.61 +            msr_content = vmcb->sysenter_esp;
    2.62              break;
    2.63          case MSR_IA32_SYSENTER_EIP:     
    2.64 -            regs->eax = vmcb->sysenter_eip;
    2.65 +            msr_content = vmcb->sysenter_eip;
    2.66              break;
    2.67 -        case MSR_IA32_TIME_STAMP_COUNTER:
    2.68 -            __asm__ __volatile__("rdtsc" : "=a" (regs->eax), "=d" (regs->edx));
    2.69 -            tsc_sum = regs->edx;
    2.70 -            tsc_sum = (tsc_sum << 32) + regs->eax;
    2.71 -            tsc_sum += (int64_t) vmcb->tsc_offset;
    2.72 -            regs->eax = tsc_sum & 0xFFFFFFFF;
    2.73 -            regs->edx = (tsc_sum >> 32) & 0xFFFFFFFF;
    2.74 +        case MSR_IA32_APICBASE:
    2.75 +            msr_content = VLAPIC(v) ? VLAPIC(v)->apic_base_msr : 0;
    2.76              break;
    2.77          default:
    2.78              if (long_mode_do_msr_read(regs))
    2.79 @@ -1733,21 +1746,30 @@ static inline void svm_do_msr_access(str
    2.80              rdmsr_safe(regs->ecx, regs->eax, regs->edx);
    2.81              break;
    2.82          }
    2.83 +        regs->eax = msr_content & 0xFFFFFFFF;
    2.84 +        regs->edx = msr_content >> 32;
    2.85      }
    2.86      else
    2.87      {
    2.88          inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL);
    2.89 +        msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32);
    2.90  
    2.91          switch (regs->ecx)
    2.92          {
    2.93 +        case MSR_IA32_TIME_STAMP_COUNTER:
    2.94 +            svm_set_guest_time(v, msr_content);
    2.95 +            break;
    2.96          case MSR_IA32_SYSENTER_CS:
    2.97 -            vmcb->sysenter_cs = regs->eax;
    2.98 +            vmcb->sysenter_cs = msr_content;
    2.99              break;
   2.100          case MSR_IA32_SYSENTER_ESP: 
   2.101 -            vmcb->sysenter_esp = regs->eax;
   2.102 +            vmcb->sysenter_esp = msr_content;
   2.103              break;
   2.104          case MSR_IA32_SYSENTER_EIP:     
   2.105 -            vmcb->sysenter_eip = regs->eax;
   2.106 +            vmcb->sysenter_eip = msr_content;
   2.107 +            break;
   2.108 +        case MSR_IA32_APICBASE:
   2.109 +            vlapic_msr_set(VLAPIC(v), msr_content);
   2.110              break;
   2.111          default:
   2.112              long_mode_do_msr_write(regs);
     3.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Thu Mar 23 10:47:44 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu Mar 23 10:50:34 2006 +0100
     3.3 @@ -467,6 +467,8 @@ void svm_do_launch(struct vcpu *v)
     3.4      v->arch.hvm_svm.injecting_event  = 0;
     3.5      v->arch.hvm_svm.saved_irq_vector = -1;
     3.6  
     3.7 +    svm_set_guest_time(v, 0);
     3.8 +	
     3.9      if (svm_dbg_on)
    3.10          svm_dump_vmcb(__func__, vmcb);
    3.11  }
    3.12 @@ -494,16 +496,17 @@ void svm_do_resume(struct vcpu *v)
    3.13      struct hvm_virpit *vpit = &d->arch.hvm_domain.vpit;
    3.14  
    3.15      svm_stts(v);
    3.16 +    
    3.17 +    /* pick up the elapsed PIT ticks and re-enable pit_timer */
    3.18 +    if ( vpit->first_injected) {
    3.19 +        svm_set_guest_time(v, v->domain->arch.hvm_domain.guest_time);
    3.20 +        pickup_deactive_ticks(vpit);
    3.21 +    }
    3.22  
    3.23      if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
    3.24           test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
    3.25          hvm_wait_io();
    3.26  
    3.27 -    /* pick up the elapsed PIT ticks and re-enable pit_timer */
    3.28 -    if ( vpit->first_injected )
    3.29 -        pickup_deactive_ticks(vpit);
    3.30 -    svm_set_tsc_shift(v, vpit);
    3.31 -
    3.32      /* We can't resume the guest if we're waiting on I/O */
    3.33      ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
    3.34  }
     4.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Thu Mar 23 10:47:44 2006 +0100
     4.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Thu Mar 23 10:50:34 2006 +0100
     4.3 @@ -48,6 +48,8 @@ extern void svm_dump_vmcb(const char *fr
     4.4  extern void svm_stts(struct vcpu *v); 
     4.5  extern void svm_do_launch(struct vcpu *v);
     4.6  extern void svm_do_resume(struct vcpu *v);
     4.7 +extern void svm_set_guest_time(struct vcpu *v, u64 gtime);
     4.8 +extern u64 svm_get_guest_time(struct vcpu *v);
     4.9  extern void arch_svm_do_resume(struct vcpu *v);
    4.10  extern int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa);
    4.11  /* For debugging. Remove when no longer needed. */