ia64/xen-unstable

changeset 11164:e2e7f4c17b77

[HVM] Provide common support function for HLT emulation: hvm_hlt().
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Aug 16 14:27:30 2006 +0100 (2006-08-16)
parents ebd289e3d205
children c757ebffd500
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/support.h xen/include/asm-x86/hvm/svm/vmcb.h xen/include/asm-x86/hvm/vcpu.h xen/include/asm-x86/hvm/vmx/vmcs.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Aug 16 14:26:59 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Aug 16 14:27:30 2006 +0100
     1.3 @@ -345,6 +345,36 @@ int cpu_get_interrupt(struct vcpu *v, in
     1.4      return -1;
     1.5  }
     1.6  
     1.7 +void hvm_hlt(unsigned long rflags)
     1.8 +{
     1.9 +    struct vcpu *v = current;
    1.10 +    struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
    1.11 +    s_time_t next_pit = -1, next_wakeup;
    1.12 +
    1.13 +    /*
    1.14 +     * Detect machine shutdown.  Only do this for vcpu 0, to avoid potentially 
    1.15 +     * shutting down the domain early. If we halt with interrupts disabled, 
    1.16 +     * that's a pretty sure sign that we want to shut down.  In a real 
    1.17 +     * processor, NMIs are the only way to break out of this.
    1.18 +     */
    1.19 +    if ( (v->vcpu_id == 0) && !(rflags & X86_EFLAGS_IF) )
    1.20 +    {
    1.21 +        printk("D%d: HLT with interrupts enabled -- shutting down.\n",
    1.22 +               current->domain->domain_id);
    1.23 +        domain_shutdown(current->domain, SHUTDOWN_poweroff);
    1.24 +        return;
    1.25 +    }
    1.26 +
    1.27 +    if ( !v->vcpu_id )
    1.28 +        next_pit = get_scheduled(v, pt->irq, pt);
    1.29 +    next_wakeup = get_apictime_scheduled(v);
    1.30 +    if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
    1.31 +        next_wakeup = next_pit;
    1.32 +    if ( next_wakeup != - 1 ) 
    1.33 +        set_timer(&current->arch.hvm_vcpu.hlt_timer, next_wakeup);
    1.34 +    do_sched_op_compat(SCHEDOP_block, 0);
    1.35 +}
    1.36 +
    1.37  /*
    1.38   * Copy from/to guest virtual.
    1.39   */
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Aug 16 14:26:59 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Aug 16 14:27:30 2006 +0100
     2.3 @@ -835,7 +835,7 @@ static void svm_relinquish_guest_resourc
     2.4  
     2.5          destroy_vmcb(&v->arch.hvm_svm);
     2.6          free_monitor_pagetable(v);
     2.7 -        kill_timer(&v->arch.hvm_svm.hlt_timer);
     2.8 +        kill_timer(&v->arch.hvm_vcpu.hlt_timer);
     2.9          if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) ) 
    2.10          {
    2.11              kill_timer( &(VLAPIC(v)->vlapic_timer) );
    2.12 @@ -863,7 +863,7 @@ static void svm_migrate_timers(struct vc
    2.13  
    2.14      if ( pt->enabled ) {
    2.15          migrate_timer( &pt->timer, v->processor );
    2.16 -        migrate_timer( &v->arch.hvm_svm.hlt_timer, v->processor );
    2.17 +        migrate_timer( &v->arch.hvm_vcpu.hlt_timer, v->processor );
    2.18      }
    2.19      if ( hvm_apic_support(v->domain) && VLAPIC( v ))
    2.20          migrate_timer( &(VLAPIC(v)->vlapic_timer ), v->processor );
    2.21 @@ -2144,47 +2144,16 @@ done:
    2.22  }
    2.23  
    2.24  
    2.25 -/*
    2.26 - * Need to use this exit to reschedule
    2.27 - */
    2.28  static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
    2.29  {
    2.30 -    struct vcpu *v = current;
    2.31 -    struct periodic_time *pt = 
    2.32 -        &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
    2.33 -    s_time_t next_pit = -1, next_wakeup;
    2.34 -
    2.35      __update_guest_eip(vmcb, 1);
    2.36  
    2.37 -    /* check for interrupt not handled or new interrupt */
    2.38 -    if ( vmcb->vintr.fields.irq || cpu_has_pending_irq(v) )
    2.39 +    /* Check for interrupt not handled or new interrupt. */
    2.40 +    if ( (vmcb->rflags & X86_EFLAGS_IF) &&
    2.41 +         (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) )
    2.42         return;
    2.43  
    2.44 -    /* Detect machine shutdown.  Only do this for vcpu 0, to avoid
    2.45 -       potentially shutting down the domain early. */
    2.46 -    if (v->vcpu_id == 0) {
    2.47 -        unsigned long rflags = vmcb->rflags; 
    2.48 -        /* If we halt with interrupts disabled, that's a pretty sure
    2.49 -           sign that we want to shut down.  In a real processor, NMIs
    2.50 -           are the only way to break out of this.  Our SVM code won't
    2.51 -           deliver interrupts, but will wake it up whenever one is
    2.52 -           pending... */
    2.53 -        if(!(rflags & X86_EFLAGS_IF)) {
    2.54 -            printk("D%d: HLT with interrupts enabled @0x%lx  Shutting down.\n",
    2.55 -                   current->domain->domain_id, (unsigned long)vmcb->rip);
    2.56 -            domain_shutdown(current->domain, SHUTDOWN_poweroff);
    2.57 -            return;
    2.58 -        }
    2.59 -    }
    2.60 -
    2.61 -    if ( !v->vcpu_id )
    2.62 -        next_pit = get_scheduled(v, pt->irq, pt);
    2.63 -    next_wakeup = get_apictime_scheduled(v);
    2.64 -    if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
    2.65 -        next_wakeup = next_pit;
    2.66 -    if ( next_wakeup != - 1 )
    2.67 -        set_timer(&current->arch.hvm_svm.hlt_timer, next_wakeup);
    2.68 -    do_sched_op_compat(SCHEDOP_block, 0);
    2.69 +    hvm_hlt(vmcb->rflags);
    2.70  }
    2.71  
    2.72  
     3.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Wed Aug 16 14:26:59 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Wed Aug 16 14:27:30 2006 +0100
     3.3 @@ -360,8 +360,7 @@ void svm_do_launch(struct vcpu *v)
     3.4  
     3.5      if (hvm_apic_support(v->domain))
     3.6          vlapic_init(v);
     3.7 -    init_timer(&v->arch.hvm_svm.hlt_timer,
     3.8 -				hlt_timer_fn, v, v->processor);
     3.9 +    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
    3.10  
    3.11      vmcb->ldtr.sel = 0;
    3.12      vmcb->ldtr.base = 0;
     4.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Aug 16 14:26:59 2006 +0100
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Aug 16 14:27:30 2006 +0100
     4.3 @@ -267,7 +267,7 @@ static void vmx_do_launch(struct vcpu *v
     4.4          vlapic_init(v);
     4.5  
     4.6      vmx_set_host_env(v);
     4.7 -    init_timer(&v->arch.hvm_vmx.hlt_timer, hlt_timer_fn, v, v->processor);
     4.8 +    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
     4.9  
    4.10      error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
    4.11      error |= __vmwrite(GUEST_LDTR_BASE, 0);
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 16 14:26:59 2006 +0100
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 16 14:27:30 2006 +0100
     5.3 @@ -134,7 +134,7 @@ static void vmx_relinquish_guest_resourc
     5.4          if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
     5.5              continue;
     5.6          free_monitor_pagetable(v);
     5.7 -        kill_timer(&v->arch.hvm_vmx.hlt_timer);
     5.8 +        kill_timer(&v->arch.hvm_vcpu.hlt_timer);
     5.9          if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
    5.10          {
    5.11              kill_timer(&VLAPIC(v)->vlapic_timer);
    5.12 @@ -496,7 +496,7 @@ void vmx_migrate_timers(struct vcpu *v)
    5.13  
    5.14      if ( pt->enabled ) {
    5.15          migrate_timer(&pt->timer, v->processor);
    5.16 -        migrate_timer(&v->arch.hvm_vmx.hlt_timer, v->processor);
    5.17 +        migrate_timer(&v->arch.hvm_vcpu.hlt_timer, v->processor);
    5.18      }
    5.19      if ( hvm_apic_support(v->domain) && VLAPIC(v))
    5.20          migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
    5.21 @@ -2049,46 +2049,11 @@ static inline void vmx_do_msr_write(stru
    5.22                  (unsigned long)regs->edx);
    5.23  }
    5.24  
    5.25 -/*
    5.26 - * Need to use this exit to reschedule
    5.27 - */
    5.28  void vmx_vmexit_do_hlt(void)
    5.29  {
    5.30 -    struct vcpu *v = current;
    5.31 -    struct periodic_time *pt = 
    5.32 -        &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
    5.33 -    s_time_t next_pit = -1, next_wakeup;
    5.34 -
    5.35 -
    5.36 -    /* Detect machine shutdown.  Only do this for vcpu 0, to avoid
    5.37 -       potentially shutting down the domain early. */
    5.38 -    if (v->vcpu_id == 0) {
    5.39 -        unsigned long rflags;
    5.40 -        
    5.41 -        __vmread(GUEST_RFLAGS, &rflags);
    5.42 -        /* If we halt with interrupts disabled, that's a pretty sure
    5.43 -           sign that we want to shut down.  In a real processor, NMIs
    5.44 -           are the only way to break out of this.  Our VMX code won't
    5.45 -           deliver interrupts, but will wake it up whenever one is
    5.46 -           pending... */
    5.47 -        if(!(rflags & X86_EFLAGS_IF)) {
    5.48 -            unsigned long rip;
    5.49 -            __vmread(GUEST_RIP, &rip);
    5.50 -            printk("D%d: HLT with interrupts enabled @0x%lx  Shutting down.\n",
    5.51 -                   current->domain->domain_id, rip);
    5.52 -            domain_shutdown(current->domain, SHUTDOWN_poweroff);
    5.53 -            return;
    5.54 -        }
    5.55 -    }
    5.56 -
    5.57 -    if ( !v->vcpu_id )
    5.58 -        next_pit = get_scheduled(v, pt->irq, pt);
    5.59 -    next_wakeup = get_apictime_scheduled(v);
    5.60 -    if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
    5.61 -        next_wakeup = next_pit;
    5.62 -    if ( next_wakeup != - 1 ) 
    5.63 -        set_timer(&current->arch.hvm_vmx.hlt_timer, next_wakeup);
    5.64 -    do_sched_op_compat(SCHEDOP_block, 0);
    5.65 +    unsigned long rflags;
    5.66 +    __vmread(GUEST_RFLAGS, &rflags);
    5.67 +    hvm_hlt(rflags);
    5.68  }
    5.69  
    5.70  static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
     6.1 --- a/xen/include/asm-x86/hvm/support.h	Wed Aug 16 14:26:59 2006 +0100
     6.2 +++ b/xen/include/asm-x86/hvm/support.h	Wed Aug 16 14:27:30 2006 +0100
     6.3 @@ -148,4 +148,6 @@ void hvm_do_hypercall(struct cpu_user_re
     6.4  
     6.5  void hvm_prod_vcpu(struct vcpu *v);
     6.6  
     6.7 +void hvm_hlt(unsigned long rflags);
     6.8 +
     6.9  #endif /* __ASM_X86_HVM_SUPPORT_H__ */
     7.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Wed Aug 16 14:26:59 2006 +0100
     7.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Wed Aug 16 14:27:30 2006 +0100
     7.3 @@ -448,7 +448,6 @@ struct arch_svm_struct {
     7.4      unsigned long       cpu_cr2;
     7.5      unsigned long       cpu_cr3;
     7.6      unsigned long       cpu_state;
     7.7 -    struct timer        hlt_timer;  /* hlt ins emulation wakeup timer */
     7.8  };
     7.9  
    7.10  extern struct vmcb_struct *alloc_vmcb(void);
     8.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Wed Aug 16 14:26:59 2006 +0100
     8.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Wed Aug 16 14:27:30 2006 +0100
     8.3 @@ -43,15 +43,18 @@ struct hvm_vcpu {
     8.4      /* Flags */
     8.5      int                 flag_dr_dirty;
     8.6  
     8.7 +    /* hlt ins emulation wakeup timer */
     8.8 +    struct timer        hlt_timer;
     8.9 +
    8.10      union {
    8.11          struct arch_vmx_struct vmx;
    8.12          struct arch_svm_struct svm;
    8.13      } u;
    8.14  };
    8.15  
    8.16 -#define ARCH_HVM_IO_WAIT            1   /* Waiting for I/O completion */
    8.17 +#define ARCH_HVM_IO_WAIT         1   /* Waiting for I/O completion */
    8.18  
    8.19 -#define HVM_CONTEXT_STACK_BYTES     (offsetof(struct cpu_user_regs, error_code))
    8.20 +#define HVM_CONTEXT_STACK_BYTES  (offsetof(struct cpu_user_regs, error_code))
    8.21  
    8.22  #endif /* __ASM_X86_HVM_VCPU_H__ */
    8.23  
     9.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Aug 16 14:26:59 2006 +0100
     9.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Aug 16 14:27:30 2006 +0100
     9.3 @@ -93,7 +93,6 @@ struct arch_vmx_struct {
     9.4      unsigned long        cpu_based_exec_control;
     9.5      struct vmx_msr_state msr_content;
     9.6      void                *io_bitmap_a, *io_bitmap_b;
     9.7 -    struct timer         hlt_timer;  /* hlt ins emulation wakeup timer */
     9.8  };
     9.9  
    9.10  #define vmx_schedule_tail(next)         \